text
stringlengths 3
1.04M
| lang
stringclasses 4
values | len
int64 3
1.04M
|
---|---|---|
#!/usr/bin/env python3
#
# Sample code for the explainer submodule.
#################################### SOURCE START ###################################
"""
Overview:
Calculation and visualization of feature importance.
Usage:
main_optuna_and_shap_for_boston_housing.py [--model_type <str>] [--n_trials <int>] [--seed <int>]
main_optuna_and_shap_for_boston_housing.py (-h | --help)
Options:
--model_type <str> Model type (rffgpr or rffregression). [default: rffgpr]
--n_trials <int> Number of trials in hyper parameter tuning. [default: 300]
--seed <int> Random seed. [default: 111]
-h, --help Show this message.
"""
import os
import sys
import docopt
import matplotlib.pyplot as mpl
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
import sklearn.preprocessing
### Create Boston housing dataset instance.
def generate_boston_housing_dataset():
### Load Boston Housing data from sklearn.
data = sklearn.datasets.load_boston()
### Split data to train and valid.
Xs_train, Xs_valid, ys_train, ys_valid \
= sklearn.model_selection.train_test_split(data["data"], data["target"], test_size = 0.2, random_state = 111)
### Data standardization.
scaler = sklearn.preprocessing.StandardScaler().fit(Xs_train)
Xs_train = scaler.transform(Xs_train)
Xs_valid = scaler.transform(Xs_valid)
return (Xs_train, Xs_valid, ys_train, ys_valid, data.feature_names)
### Main procedure
def main(args):
### Fix seed for random fourier feature calclation
rfflearn.seed(int(args["--seed"]))
### Prepare training data
Xs_train, Xs_valid, ys_train, ys_valid, feature_names = generate_boston_housing_dataset()
### Hyper parameter tuning.
### The returned value `study` contains the results of hyper parameter tuning,
### including the best parameters (study.best_params) and best model (= study.user_attrs["best_model"]).
if args["--model_type"] == "rffgpr":
study = rfflearn.RFFGPR_tuner(
train_set = (Xs_train, ys_train),
valid_set = (Xs_valid, ys_valid),
range_dim_kernel = (16, 256),
range_std_kernel = (1.0E-10, 1.0E-3),
range_std_error = (1.0E-5, 1.0E-2),
n_trials = int(args["--n_trials"])
)
elif args["--model_type"] == "rffregression":
study = rfflearn.RFFRegression_tuner(
train_set = (Xs_train, ys_train),
valid_set = (Xs_valid, ys_valid),
range_dim_kernel = (16, 128),
range_std_kernel = (1.0E-10, 1.0E-3),
n_trials = int(args["--n_trials"]),
n_jobs = -1
)
else: raise NotImplementedError("model type should be 'rffgpr' or 'rffregression'.")
### Show the result of the hyper parameter tuning.
print("- study.best_params:", study.best_params)
print("- study.best_value:", study.best_value)
print("- study.best_model:", study.user_attrs["best_model"])
### Conduct prediction for the test data
best_model = study.user_attrs["best_model"]
ys_valid_p = best_model.predict(Xs_valid)
score_r2 = sklearn.metrics.r2_score(ys_valid, ys_valid_p)
print("- R2 score of the best model: ", score_r2)
### Calculate feature importance (SHAP and permutation importance).
shap_values = rfflearn.shap_feature_importance(best_model, Xs_valid)
perm_values = rfflearn.permutation_feature_importance(best_model, Xs_valid, ys_valid)
### Draw regression result.
mpl.figure(0)
mpl.scatter(ys_valid_p, ys_valid, alpha = 0.5)
mpl.plot([0, 50], [0, 50], "--", color = "#666666")
mpl.title("Regression of Boston Housing Dataset (R2 = %.4f)" % score_r2)
mpl.xlabel("Predicted price MEDV ($1000s)")
mpl.ylabel("True price MEDV ($1000s)")
mpl.grid()
### Visualize SHAP importance.
mpl.figure(1)
rfflearn.shap_plot(shap_values, Xs_valid, feature_names, show = False)
### Visualize permurtation importance.
mpl.figure(2)
rfflearn.permutation_plot(perm_values, feature_names, show = False)
### Show all figures.
mpl.show()
if __name__ == "__main__":
### Parse input arguments.
args = docopt.docopt(__doc__)
### Add path to 'rfflearn/' directory.
### The followings are not necessary if you copied 'rfflearn/' to the current
### directory or other directory which is included in the Python path.
current_dir = os.path.dirname(__file__)
module_path = os.path.join(current_dir, "../../")
sys.path.append(module_path)
import rfflearn.cpu as rfflearn
### Run main procedure.
main(args)
#################################### SOURCE FINISH ##################################
# Author: Tetsuya Ishikawa <[email protected]>
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
| python | 4,989 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
from omsdk.catalog.pdkcatalog import DellPDKCatalog
from omsdk.catalog.updaterepo import UpdateRepo, RepoComparator
from omsdk.catalog.sdkhttpsrc import DownloadHelper, DownloadProtocolEnum
from omsdk.sdkprint import PrettyPrint
import threading
import os
import glob
import logging
logger = logging.getLogger(__name__)
class UpdateManager(object):
_update_store = None
_update_store_lock = threading.Lock()
@staticmethod
def configure(update_share, site='downloads.dell.com',
protocol=DownloadProtocolEnum.HTTP):
if not update_share.IsValid:
logger.debug("Update Share is not valid")
return False
if UpdateManager._update_store is None:
with UpdateManager._update_store_lock:
if UpdateManager._update_store is None:
UpdateManager._update_store = \
_UpdateCacheManager(update_share, site, protocol)
return (UpdateManager._update_store is not None)
@staticmethod
def update_catalog():
if UpdateManager._update_store:
return UpdateManager._update_store.update_catalog()
return {'Status': 'Failed', 'Message': 'Update Manager is not initialized'}
@staticmethod
def update_cache(catalog='Catalog'):
if UpdateManager._update_store:
return UpdateManager._update_store.update_cache(catalog='Catalog')
return {'Status': 'Failed', 'Message': 'Update Manager is not initialized'}
@staticmethod
def get_instance():
return UpdateManager._update_store
class _UpdateCacheManager(object):
def __init__(self, update_share, site, protocol):
self._update_share = update_share
self._master_share = update_share.makedirs("_master") \
.new_file('Catalog.xml')
self._master = MasterCatalog(self._master_share)
self._inventory_share = update_share.makedirs("_inventory")
self._cache_catalogs = {}
self._initialize()
self._conn = DownloadHelper(site=site, protocol=protocol)
def _initialize(self):
self._master.load()
catalogs_path = os.path.join(self._update_share.local_full_path, '*.xml')
for name in glob.glob(catalogs_path):
fname = os.path.basename(name).replace('.xml', '')
if fname not in self._cache_catalogs:
self._cache_catalogs[fname] = None
self._cache_catalogs['Catalog'] = None
def _randomCatalogScoper(self):
fname = self._update_share.mkstemp(prefix='upd', suffix='.xml').local_full_path
self.getCatalogScoper(os.path.basename(fname).replace('.xml', ''))
def getCatalogScoper(self, name='Catalog'):
if name not in self._cache_catalogs:
self._cache_catalogs[name] = None
if not self._cache_catalogs[name]:
cache_share = self._update_share.new_file(name + '.xml')
self._cache_catalogs[name] = (cache_share,
CatalogScoper(self._master, cache_share))
return self._cache_catalogs[name]
def getInventoryShare(self):
return self._inventory_share
def update_catalog(self):
folder = self._master_share.local_folder_path
c = 'catalog/Catalog.gz'
retval = self._conn.download_newerfiles([c], folder)
logger.debug("Download Success = {0}, Failed = {1}"
.format(retval['success'], retval['failed']))
if retval['failed'] == 0 and \
self._conn.unzip_file(os.path.join(folder, c),
os.path.join(folder, 'Catalog.xml')):
retval['Status'] = 'Success'
else:
logger.debug("Unable to download and extract " + c)
retval['Status'] = 'Failed'
self._conn.disconnect()
self._initialize()
return retval
def update_cache(self, catalog='Catalog'):
(cache_share, cache) = self.getCatalogScoper(catalog)
retval = self._conn.download_newerfiles(cache.UpdateFileDetails,
self._update_share.local_full_path)
logger.debug("Download Success = {0}, Failed = {1}". \
format(retval['success'], retval['failed']))
if retval['failed'] == 0:
retval['Status'] = 'Success'
else:
retval['Status'] = 'Failed'
self._conn.disconnect()
return retval
class MasterCatalog(object):
def __init__(self, master_share):
self._master_share = master_share
self.cache_lock = threading.Lock()
logger.debug("master:" + self._master_share.local_full_path)
def load(self):
with self.cache_lock:
self.cmaster = DellPDKCatalog(self._master_share.local_full_path)
class CatalogScoper(object):
def __init__(self, master_catalog, cache_share):
self._cache_share = cache_share
self.cache_lock = threading.Lock()
self._master_catalog = master_catalog
logger.debug("cache:" + self._cache_share.local_folder_path)
logger.debug("cache:" + self._cache_share.local_file_name)
self._rcache = UpdateRepo(self._cache_share.local_folder_path,
catalog=self._cache_share.local_file_name,
source=self._master_catalog.cmaster, mkdirs=True)
@property
def UpdateFilePaths(self):
return self._rcache.UpdateFilePaths
@property
def UpdateFileDetails(self):
return self._rcache.UpdateFileDetails
def add_to_scope(self, model, swidentity=None, *components):
count = 0
with self.cache_lock:
comps = [i for i in components]
if len(comps) > 0 and swidentity is None:
logger.error('Software Identity must be given when scoping updates to components')
if swidentity:
count = self._rcache.filter_by_component(model,
swidentity, compfqdd=comps)
else:
count = self._rcache.filter_by_model(model)
return count
def compare(self, model, swidentity):
compare = RepoComparator(swidentity)
self._rcache.filter_by_component(model, swidentity, compare=compare)
return compare.final()
def save(self):
with self.cache_lock:
self._rcache.store()
def dispose(self):
with self.cache_lock:
if self._cache_share.IsTemp:
logger.debug("Temporary cache")
self._cache_share.dispose()
else:
logger.debug("Not a temporary cache")
| python | 7,777 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
from .fusion_lib import get_fusion_unit
from nn_meter.utils.graph_tool import ModelGraph
class RuleReader:
rules_default = {
"MON": 0,
"FN": True,
}
multiop_blocks = ["se", "hswish", "channelshuffle", "gap"]
def __init__(self, rule_file=None):
self.rules = {}
if rule_file:
with open(rule_file, "r") as fp:
self.rules = json.load(fp)
self._extract_fusible()
self._parse_multiop_block()
def is_fusible(self, node_type, outnode_type):
return (node_type, outnode_type) in self.fusible
def query_rule(self, rule):
if rule not in self.rules or self.rules[rule]["obey"] is None:
return self.rules_default[rule]
else:
return self.rules[rule]["obey"]
def _extract_fusible(self):
def get_name(i):
return f"{ops[i]}_{i}"
self.fusible = []
self.fusion_units = {}
for name, rule in self.rules.items():
if rule["obey"] and name.startswith("BF"):
ops = name.split("_")[1:]
if len(ops) == 2:
self.fusible.append((ops[0], ops[1]))
elif len(ops) > 2:
fusion_unit = {}
for i in range(0, len(ops)):
fusion_unit[get_name(i)] = {
"attr": {
"type": ops[i],
"attr": {},
},
"inbounds": [get_name(i - 1)] if i > 0 else [],
"outbounds": [get_name(i + 1)] if i < len(ops) - 1 else [],
}
self.fusion_units["-".join(ops)] = [ModelGraph(graph=fusion_unit)]
def _parse_multiop_block(self):
for block in self.multiop_blocks:
self.fusion_units[block] = get_fusion_unit(block)
| python | 2,021 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can generate Visual Studio 10.0 project (.vcxproj) and
solution (.sln) files that contain SCC information and look correct.
"""
import os
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['10.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_10_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_10_0
SConscript_contents = """\
env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='10.0',
CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
CPPPATH=['inc1', 'inc2'],
MSVS_SCC_LOCAL_PATH='C:\\MyMsVsProjects',
MSVS_SCC_PROJECT_NAME='Perforce Project')
testsrc = ['test1.cpp', 'test2.cpp']
testincs = ['sdk.h']
testlocalincs = ['test.h']
testresources = ['test.rc']
testmisc = ['readme.txt']
env.MSVSProject(target = 'Test.vcxproj',
srcs = testsrc,
incs = testincs,
localincs = testlocalincs,
resources = testresources,
misc = testmisc,
buildtarget = 'Test.exe',
variant = 'Release')
"""
expected_vcproj_sccinfo = """\
\t\t<SccProjectName>Perforce Project</SccProjectName>
\t\t<SccLocalPath>C:\\MyMsVsProjects</SccLocalPath>
"""
test.write('SConstruct', SConscript_contents)
test.run(arguments="Test.vcxproj")
test.must_exist(test.workpath('Test.vcxproj'))
vcproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '10.0', None, 'SConstruct',
vcproj_sccinfo=expected_vcproj_sccinfo)
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '10.0', None, 'SConstruct')
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| python | 3,323 |
import asyncio
import traceback
from abc import ABC
import aio_pika
import orjson
class Broker(ABC):
def __init__(self, callback=None):
async def _default_callback(*_, **__):
pass
self.callback = callback or _default_callback
async def connect(self, group, *args, **kwargs):
pass
async def subscribe(self, queue, *events):
pass
async def send(self, data):
pass
class AMQPBroker(Broker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.connection = None
self.channel = None
self.exchange = None
async def connect(self, group, *args, **kwargs):
self.connection = await aio_pika.connect_robust(*args, **kwargs)
self.channel = await self.connection.channel()
self.exchange = await self.channel.declare_exchange(
group, type="direct", durable=True
)
async def subscribe(self, queue, *events):
queue = await self.channel.declare_queue(queue, auto_delete=not queue)
for event in events:
await queue.bind(self.exchange, event.upper())
async with queue.iterator() as queue_iter:
async for msg in queue_iter:
try:
async with msg.process():
data = orjson.loads(msg.body)
await self.callback(msg.routing_key.upper(), data)
except asyncio.CancelledError:
raise
except Exception:
traceback.print_exc()
async def send(self, data):
await self.exchange.publish(
aio_pika.Message(body=orjson.dumps(data)), routing_key="SEND"
)
| python | 1,745 |
"""
Util module.
General methods to use, no dependencies to other internal modules
"""
import numpy as np
import os
import cv2
import sys
import logging
import multiprocessing as mp
# Alternative implementation in Cython
from brescount import bres_line
def logger():
"""Get the module logger"""
return logging.getLogger("copa_map")
def get_cpu_count():
"""Get the number of CPUs"""
try:
cpu_count = int(os.environ['PBS_NUM_PPN']) # luis cluster PBS
except KeyError:
try:
cpu_count = int(os.environ['SLURM_CPUS_PER_TASK']) # luis cluster SLURM
except KeyError:
cpu_count = mp.cpu_count()
# Return one less core to keep the system responsive
return max([cpu_count - 1, 1])
def _bresenhamline_nslope(slope):
"""Normalize slope for Bresenham's line algorithm."""
scale = np.amax(np.abs(slope), axis=1).reshape(-1, 1)
zeroslope = (scale == 0).all(1)
scale[zeroslope] = np.ones(1)
normalizedslope = np.array(slope, dtype=np.double) / scale
normalizedslope[zeroslope] = np.zeros(slope[0].shape)
return normalizedslope
def _bresenhamlines(start, end, max_iter):
"""Returns npts lines of length max_iter each. (npts x max_iter x dimension)."""
if max_iter == -1:
max_iter = np.amax(np.amax(np.abs(end - start), axis=1))
npts, dim = start.shape
nslope = _bresenhamline_nslope(end - start)
# steps to iterate on
stepseq = np.arange(1, max_iter + 1)
stepmat = np.tile(stepseq, (dim, 1)).T
# some hacks for broadcasting properly
bline = start[:, np.newaxis, :] + nslope[:, np.newaxis, :] * stepmat
# Approximate to nearest int
return np.array(np.rint(bline), dtype=start.dtype)
# def _bresenhamline(start, end):
# return bres_line(start, end)
def min_on_line_coord(start : np.array, end : np.array, matrix : np.array, thresh : float):
"""
Return the coordinate on a line between two points, that has a value smaller than a threshold
Given a start and end point, as coordinates of a matrix, the functions creates a line (bresenham) between the
two points.
Then the values of the matrix corresponding to the line are checked, and the first coord. is returned that has
a value smaller than thresh
Args:
start: start point (2 x 1)
end: end point (2 x 1)
matrix: Array with values
thresh: Threshold value to check
Returns:
(2 x 1) coordinate with first small value, or endpoint if none is found
"""
assert len(start) == 2 and len(end) == 2, "Coordinates must be 2D arrays"
line = bres_line(start, end)
vals_line = matrix[line[:, 0], line[:, 1]]
cond = vals_line < thresh
# If no value is smaller, return endpoint
if np.all(~cond):
return end
# Return first point where value is smaller
return line[np.argmax(cond)]
def bresenhamline(start, end, max_iter=-1):
"""
Returns a list of points from (start, end] by ray tracing a line b/w the points.
Args:
start: An array of start points (number of points x dimension)
end: An end points (1 x dimension)
or An array of end point corresponding to each start point
(number of points x dimension)
max_iter: Max points to traverse. if -1, maximum number of required
points are traversed
Returns:
linevox (n x dimension) A cumulative array of all points traversed by
all the lines so far.
"""
# Return the points as a single array
# return _bresenhamline(start, end)
return _bresenhamlines(start, end, max_iter).reshape(-1, start.shape[-1])
def isPD(matrix):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(matrix)
return True
except np.linalg.LinAlgError:
return False
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
Taken from: https://gist.github.com/fasiha/fdb5cec2054e6f1c6ae35476045a0bbd
A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
A = np.array(A)
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I_diag = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I_diag * (-mineig * k ** 2 + spacing)
k += 1
return A3
def package_path(*paths, file=__file__):
"""
Helper to receive an absolute path within the package directories
Args:
*paths: (Optional) String to append to package path
file: (Optional) File to get the path from, default parameter this script
Returns:
Absolute path
"""
return os.path.join(os.path.dirname(os.path.abspath(file)), *paths)
def abs_path():
"""Absolute package path"""
return os.path.abspath(os.path.join(sys.modules["copa_map"].__file__, os.pardir))
def remove_small_objects_in_image(img, min_pixel_size):
"""
Removes white objects, which are smaller than minimum size
Args:
img: Input image
min_pixel_size: Minimum pixel size
Returns:
Cleaned image
"""
img = img.astype(np.uint8)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
img2 = np.zeros(output.shape)
for i in range(0, nb_components):
if sizes[i] >= min_pixel_size:
img2[output == i + 1] = 255
return img2.astype(np.uint8)
| python | 6,630 |
from . import db
class PersonalBelongings(db.Model):
__name__ = 'PersonalBelongings'
__tablename__ = 'personal_belongings'
id = db.Column(db.Integer, primary_key=True)
# Personal belongings type
type = db.Column(db.Integer, nullable=False)
# some types have subtype
subtype = db.Column(db.Integer)
def toDict(self):
""" return dict representation of the object """
return {'id':self.id, 'personal_belongings_type':self.type, 'personal_belongings_subtype':self.subtype} | python | 485 |
import numpy as np
import sys
sys.path.append('..')
from chap11.dubins_parameters import dubins_parameters
from message_types.msg_path import msg_path
class path_manager:
def __init__(self):
# message sent to path follower
self.path = msg_path()
# pointers to previous, current, and next waypoints
self.ptr_previous = 0
self.ptr_current = 1
self.ptr_next = 2
# flag that request new waypoints from path planner
self.flag_need_new_waypoints = True
self.num_waypoints = 0
self.halfspace_n = np.inf * np.ones((3,1))
self.halfspace_r = np.inf * np.ones((3,1))
# state of the manager state machine
self.manager_state = 1
# dubins path parameters
self.dubins_path = dubins_parameters()
def update(self, waypoints, radius, state):
# this flag is set for one time step to signal a redraw in the viewer
# if self.path.flag_path_changed == True:
# self.path.flag_path_changed = False
if waypoints.num_waypoints == 0:
waypoints.flag_manager_requests_waypoints = True
else:
if waypoints.type == 'straight_line':
self.line_manager(waypoints, state)
elif waypoints.type == 'fillet':
self.fillet_manager(waypoints, radius, state)
elif waypoints.type == 'dubins':
self.dubins_manager(waypoints, radius, state)
else:
print('Error in Path Manager: Undefined waypoint type.')
return self.path
def line_manager(self, waypoints, state):
# print("waypoints",waypoints.ned)#,waypoints.course)
P = np.array([[state.pn,state.pe,-state.h]]).T
wi_1 = np.array([[waypoints.ned[0][self.ptr_previous],waypoints.ned[1][self.ptr_previous],waypoints.ned[2][self.ptr_previous]]]).T
wi = np.array([[waypoints.ned[0][self.ptr_current],waypoints.ned[1][self.ptr_current],waypoints.ned[2][self.ptr_current]]]).T
wip1 = np.array([[waypoints.ned[0][self.ptr_next],waypoints.ned[1][self.ptr_next],waypoints.ned[2][self.ptr_next]]]).T
ri_1 = wi_1
qi_1 = (wi-wi_1)/np.linalg.norm(wi-wi_1)
qi = (wip1-wi)/np.linalg.norm(wip1-wi)
### Increment path
self.halfspace_r = wi
self.halfspace_n = (qi_1 + qi) / np.linalg.norm(qi_1 + qi)
if self.inHalfSpace(P):
self.increment_pointers()
### Return
self.path.airspeed = waypoints.airspeed.item(self.ptr_previous)
self.path.line_origin = ri_1
self.path.line_direction = qi_1
def fillet_manager(self, waypoints, radius, state):
# print("waypoints",waypoints.ned)#,waypoints.course)
P = np.array([[state.pn,state.pe,-state.h]]).T
R = radius
wi_1 = np.array([[waypoints.ned[0][self.ptr_previous],waypoints.ned[1][self.ptr_previous],waypoints.ned[2][self.ptr_previous]]]).T
wi = np.array([[waypoints.ned[0][self.ptr_current],waypoints.ned[1][self.ptr_current],waypoints.ned[2][self.ptr_current]]]).T
wip1 = np.array([[waypoints.ned[0][self.ptr_next],waypoints.ned[1][self.ptr_next],waypoints.ned[2][self.ptr_next]]]).T
qi_1 = (wi-wi_1)/np.linalg.norm(wi-wi_1)
qi = (wip1-wi)/np.linalg.norm(wip1-wi)
varrho = np.arccos(float(-qi_1.T@qi))
if self.manager_state == 1:
self.path.flag = 'line'
self.path.line_origin = wi_1
self.path.line_direction = qi_1
self.halfspace_r = wi - R/np.tan(varrho/2.)*qi_1
self.halfspace_n = qi_1
if self.inHalfSpace(P):
self.manager_state = 2
elif self.manager_state == 2:
self.path.flag = 'orbit'
self.path.orbit_center = wi - R/np.sin(varrho/2.)*(qi_1-qi)/np.linalg.norm(qi_1-qi)
# self.path.orbit_center.item(2) = -100
self.path.orbit_radius = R
self.path.orbit_direction = np.sign((qi_1.item(0)*qi.item(1))-(qi_1.item(1)*qi.item(0)))
print("orbit center:", self.path.orbit_center)
self.halfspace_r = wi + R/np.tan(varrho/2.)*qi
self.halfspace_n = qi
if self.inHalfSpace(P):
self.increment_pointers()
self.manager_state = 1
### Return
self.path.airspeed = waypoints.airspeed.item(self.ptr_previous)
def dubins_manager(self, waypoints, radius, state):
P = np.array([[state.pn,state.pe,-state.h]]).T
R = radius
wi_1 = np.array([[waypoints.ned[0][self.ptr_previous],waypoints.ned[1][self.ptr_previous],waypoints.ned[2][self.ptr_previous]]]).T
chii_1 = waypoints.course[0][self.ptr_previous]
wi = np.array([[waypoints.ned[0][self.ptr_current],waypoints.ned[1][self.ptr_current],waypoints.ned[2][self.ptr_current]]]).T
chii = waypoints.course[0][self.ptr_current]
# wip1 = np.array([[waypoints.ned[0][self.ptr_next],waypoints.ned[1][self.ptr_next],waypoints.ned[2][self.ptr_next]]]).T
# chip1 = waypoints.course[self.ptr_next]
self.dubins_path.update(wi_1,chii_1,wi,chii,R)
if self.manager_state == 1:
self.path.flag = 'orbit'
self.path.orbit_center = self.dubins_path.center_s
self.path.orbit_radius = self.dubins_path.radius
self.path.orbit_direction = self.dubins_path.dir_s
self.halfspace_r = self.dubins_path.r1 #z1
self.halfspace_n = -self.dubins_path.n1 #-q1
if self.inHalfSpace(P):
self.manager_state = 2
elif self.manager_state == 2:
self.halfspace_r = self.dubins_path.r1 #z1
self.halfspace_n = self.dubins_path.n1 #q1
if self.inHalfSpace(P):
self.manager_state = 3
elif self.manager_state == 3:
self.path.flag = 'line'
self.path.line_origin = self.dubins_path.r1
self.path.line_direction = self.dubins_path.n1
self.halfspace_r = self.dubins_path.r2 #z2
self.halfspace_n = self.dubins_path.n1 #q1
if self.inHalfSpace(P):
self.manager_state = 4
elif self.manager_state == 4:
self.path.flag = 'orbit'
self.path.orbit_center = self.dubins_path.center_e
self.path.orbit_radius = self.dubins_path.radius
self.path.orbit_direction = self.dubins_path.dir_e
self.halfspace_r = self.dubins_path.r3 #z3
self.halfspace_n = -self.dubins_path.n3 #-q3
if self.inHalfSpace(P):
self.manager_state = 5
else: #state 5
if self.inHalfSpace(P):
self.manager_state = 1
self.increment_pointers()
self.dubins_path.update(wi_1,chii_1,wi,chii,R)
def initialize_pointers(self):
print("initialize pointers")
def increment_pointers(self):
print("Increment pointers")
self.ptr_previous+=1
self.ptr_current+=1
self.ptr_next+=1
def inHalfSpace(self, pos):
if (pos-self.halfspace_r).T @ self.halfspace_n >= 0:
return True
else:
return False
| python | 7,300 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-doc-return-or-yield,line-too-long
"""TEAMS experiments."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.projects.teams import teams
from official.projects.teams import teams_task
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class TeamsOptimizationConfig(optimization.OptimizationConfig):
"""TEAMS optimization config."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type="adamw",
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
epsilon=1e-6))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type="polynomial",
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type="polynomial", polynomial=PolynomialWarmupConfig(warmup_steps=10000))
@exp_factory.register_config_factory("teams/pretraining")
def teams_pretrain() -> cfg.ExperimentConfig:
"""TEAMS pretraining."""
config = cfg.ExperimentConfig(
task=teams_task.TeamsPretrainTaskConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=cfg.TrainerConfig(
optimizer_config=TeamsOptimizationConfig(), train_steps=1000000),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/sentence_prediction")
def teams_sentence_prediction() -> cfg.ExperimentConfig:
r"""Teams GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/squad")
def teams_squad() -> cfg.ExperimentConfig:
"""Teams Squad V1/V2."""
config = cfg.ExperimentConfig(
task=question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=question_answering_dataloader.QADataConfig(),
validation_data=question_answering_dataloader.QADataConfig()),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
| python | 4,443 |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
# superclass
from .Processor import Processor
# declaration
class Normalizer(Processor):
"""
A record method decorator that registers this method as a normalizer of descriptor values
"""
# meta-methods
def __call__(self, method):
"""
Add {method} as a normalizer to my registered descriptors
"""
# go through the sequence of registered descriptors
for trait in self.traits:
# and register {method} as a normalizer
trait.normalizers.append(method)
# all done
return method
# end of file
| python | 694 |
import os
import numpy as np
import matplotlib.pyplot as plt
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
# plots of average magnetisation components
averages_martinez = os.path.join(MODULE_DIR, "m_averages_ref_martinez.txt")
ref_t, ref_mx, ref_my, ref_mz = np.loadtxt(averages_martinez, unpack=True)
plt.plot(ref_t, ref_mx, "r-", label="$m_\mathrm{x}\,\mathrm{Martinez\, et\, al.}$")
plt.plot(ref_t, ref_my, "r:", label="$m_\mathrm{y}$")
plt.plot(ref_t, ref_mz, "r--", label="$m_\mathrm{z}$")
averages_finmag = os.path.join(MODULE_DIR, "dynamics.ndt")
t, mx, my, mz = np.loadtxt(averages_finmag, unpack=True)
t *= 1e9 # convert from s to ns
plt.plot(t, mx, "b-", label="$m_\mathrm{x}\,\mathrm{FinMag}$")
plt.plot(t, my, "b:")
plt.plot(t, mz, "b--")
plt.xlabel("$\mathrm{time}\, (\mathrm{ns})$")
plt.ylabel("$<m_i> = <M_i>/M_\mathrm{S}$")
plt.legend()
plt.xlim([0, 2])
plt.savefig(os.path.join(MODULE_DIR, "m_averages.pdf"))
| python | 942 |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from os import environ
from subprocess import PIPE, run
from uuid import uuid4
import pytest
from conftest import here, in_docker, wait_for_server
prj_dir = here.parent
is_ci = 'CI' in environ
should_run = (not in_docker) and is_ci
@contextmanager
def clean_docker(cmd, tag):
yield
run(['docker', cmd, '-f', tag])
@pytest.mark.skipif(not should_run, reason='in docker container or not CI')
def test_docker():
tag = f'mlrun/test-{uuid4().hex}'
cid = None
cmd = ['docker', 'build', '-f', 'dockerfiles/mlrun-api/Dockerfile', '-t', tag, '.']
run(cmd, cwd=prj_dir, check=True)
with clean_docker('rmi', tag):
port = 8080
cmd = ['docker', 'run', '--detach', '-p', f'{port}:{port}', tag]
out = run(cmd, stdout=PIPE, check=True)
cid = out.stdout.decode('utf-8').strip()
with clean_docker('rm', cid):
url = f'http://localhost:{port}/api/healthz'
timeout = 30
assert wait_for_server(url, timeout), \
f'server failed to start after {timeout} seconds, url={url}'
| python | 1,688 |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Sr(Service, OpenGraphThumbMixin):
supported_domains = ["sverigesradio.se"]
def get(self):
data = self.get_urldata()
match = re.search(r'data-audio-id="(\d+)"', data)
match2 = re.search(r'data-audio-type="(\w+)"', data)
if match and match2:
aid = match.group(1)
type = match2.group(1)
else:
yield ServiceError("Can't find audio info")
return
dataurl = "https://sverigesradio.se/sida/playerajax/" "getaudiourl?id={}&type={}&quality=high&format=iis".format(aid, type)
data = self.http.request("get", dataurl).text
playerinfo = json.loads(data)
yield HTTP(copy.copy(self.config), playerinfo["audioUrl"], 128, output=self.output)
| python | 1,061 |
import socket
import struct
import pickle
import numpy as np
import gym
class Connection:
def __init__(self, s):
self._socket = s
self._buffer = bytearray()
def receive_object(self):
while len(self._buffer) < 4 or len(self._buffer) < struct.unpack("<L", self._buffer[:4])[0] + 4:
new_bytes = self._socket.recv(16)
if len(new_bytes) == 0:
return None
self._buffer += new_bytes
length = struct.unpack("<L", self._buffer[:4])[0]
header, body = self._buffer[:4], self._buffer[4:length + 4]
obj = pickle.loads(body)
self._buffer = self._buffer[length + 4:]
return obj
def send_object(self, d):
body = pickle.dumps(d, protocol=2)
header = struct.pack("<L", len(body))
msg = header + body
self._socket.send(msg)
class Env(gym.Env):
def __init__(self, addr):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(addr)
s.listen(1)
clientsocket, address = s.accept()
self._socket = clientsocket
self._conn = Connection(clientsocket)
self.action_space = None
self.observation_space = None
def reset(self):
self._conn.send_object("reset")
msg = self._conn.receive_object()
self.action_space = eval(msg["info"]["action_space"])
self.observation_space = eval(msg["info"]["observation_space"])
return msg["observation"]
def step(self, action):
self._conn.send_object(action.tolist())
msg = self._conn.receive_object()
obs = msg["observation"]
rwd = msg["reward"]
done = msg["done"]
info = msg["info"]
return obs, rwd, done, info
def close(self):
self._conn.send_object("close")
self._socket.close()
addr = ("127.0.0.1", 50710)
env = Env(addr)
obs = env.reset()
print(obs, env.action_space, env.observation_space)
for i in range(10):
a = env.action_space.sample()
obs, rwd, done, info = env.step(a)
print(i, obs, rwd, done, info)
if done:
print("resetting")
env.reset()
env.close()
| python | 2,171 |
# Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import ctypes as ct
import fcntl
import json
import os
import re
import struct
import errno
import sys
basestring = (unicode if sys.version_info[0] < 3 else str)
from .libbcc import lib, bcc_symbol, bcc_symbol_option, _SYM_CB_TYPE
from .table import Table, PerfEventArray
from .perf import Perf
from .utils import get_online_cpus, printb, _assert_is_bytes, ArgString
_probe_limit = 1000
_num_open_probes = 0
# for tests
def _get_num_open_probes():
global _num_open_probes
return _num_open_probes
TRACEFS = "/sys/kernel/debug/tracing"
# Debug flags
# Debug output compiled LLVM IR.
DEBUG_LLVM_IR = 0x1
# Debug output loaded BPF bytecode and register state on branches.
DEBUG_BPF = 0x2
# Debug output pre-processor result.
DEBUG_PREPROCESSOR = 0x4
# Debug output ASM instructions embedded with source.
DEBUG_SOURCE = 0x8
#Debug output register state on all instructions in addition to DEBUG_BPF.
DEBUG_BPF_REGISTER_STATE = 0x10
class SymbolCache(object):
def __init__(self, pid):
self.cache = lib.bcc_symcache_new(
pid, ct.cast(None, ct.POINTER(bcc_symbol_option)))
def resolve(self, addr, demangle):
"""
Return a tuple of the symbol (function), its offset from the beginning
of the function, and the module in which it lies. For example:
("start_thread", 0x202, "/usr/lib/.../libpthread-2.24.so")
If the symbol cannot be found but we know which module it is in,
return the module name and the offset from the beginning of the
module. If we don't even know the module, return the absolute
address as the offset.
"""
sym = bcc_symbol()
if demangle:
res = lib.bcc_symcache_resolve(self.cache, addr, ct.byref(sym))
else:
res = lib.bcc_symcache_resolve_no_demangle(self.cache, addr,
ct.byref(sym))
if res < 0:
if sym.module and sym.offset:
return (None, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
return (None, addr, None)
if demangle:
name_res = sym.demangle_name
lib.bcc_symbol_free_demangle_name(ct.byref(sym))
else:
name_res = sym.name
return (name_res, sym.offset, ct.cast(sym.module, ct.c_char_p).value)
def resolve_name(self, module, name):
module = _assert_is_bytes(module)
name = _assert_is_bytes(name)
addr = ct.c_ulonglong()
if lib.bcc_symcache_resolve_name(self.cache, module, name,
ct.byref(addr)) < 0:
return -1
return addr.value
class PerfType:
# From perf_type_id in uapi/linux/perf_event.h
HARDWARE = 0
SOFTWARE = 1
class PerfHWConfig:
# From perf_hw_id in uapi/linux/perf_event.h
CPU_CYCLES = 0
INSTRUCTIONS = 1
CACHE_REFERENCES = 2
CACHE_MISSES = 3
BRANCH_INSTRUCTIONS = 4
BRANCH_MISSES = 5
BUS_CYCLES = 6
STALLED_CYCLES_FRONTEND = 7
STALLED_CYCLES_BACKEND = 8
REF_CPU_CYCLES = 9
class PerfSWConfig:
# From perf_sw_id in uapi/linux/perf_event.h
CPU_CLOCK = 0
TASK_CLOCK = 1
PAGE_FAULTS = 2
CONTEXT_SWITCHES = 3
CPU_MIGRATIONS = 4
PAGE_FAULTS_MIN = 5
PAGE_FAULTS_MAJ = 6
ALIGNMENT_FAULTS = 7
EMULATION_FAULTS = 8
DUMMY = 9
BPF_OUTPUT = 10
class BPF(object):
# From bpf_prog_type in uapi/linux/bpf.h
SOCKET_FILTER = 1
KPROBE = 2
SCHED_CLS = 3
SCHED_ACT = 4
TRACEPOINT = 5
XDP = 6
PERF_EVENT = 7
CGROUP_SKB = 8
CGROUP_SOCK = 9
LWT_IN = 10
LWT_OUT = 11
LWT_XMIT = 12
SOCK_OPS = 13
SK_SKB = 14
CGROUP_DEVICE = 15
SK_MSG = 16
RAW_TRACEPOINT = 17
CGROUP_SOCK_ADDR = 18
# from xdp_action uapi/linux/bpf.h
XDP_ABORTED = 0
XDP_DROP = 1
XDP_PASS = 2
XDP_TX = 3
_probe_repl = re.compile(b"[^a-zA-Z0-9_]")
_sym_caches = {}
_auto_includes = {
"linux/time.h": ["time"],
"linux/fs.h": ["fs", "file"],
"linux/blkdev.h": ["bio", "request"],
"linux/slab.h": ["alloc"],
"linux/netdevice.h": ["sk_buff", "net_device"]
}
# BPF timestamps come from the monotonic clock. To be able to filter
# and compare them from Python, we need to invoke clock_gettime.
# Adapted from http://stackoverflow.com/a/1205762
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ct.Structure):
_fields_ = [('tv_sec', ct.c_long), ('tv_nsec', ct.c_long)]
_librt = ct.CDLL('librt.so.1', use_errno=True)
_clock_gettime = _librt.clock_gettime
_clock_gettime.argtypes = [ct.c_int, ct.POINTER(timespec)]
@classmethod
def monotonic_time(cls):
"""monotonic_time()
Returns the system monotonic time from clock_gettime, using the
CLOCK_MONOTONIC constant. The time returned is in nanoseconds.
"""
t = cls.timespec()
if cls._clock_gettime(cls.CLOCK_MONOTONIC, ct.byref(t)) != 0:
errno = ct.get_errno()
raise OSError(errno, os.strerror(errno))
return t.tv_sec * 1e9 + t.tv_nsec
@classmethod
def generate_auto_includes(cls, program_words):
"""
Generates #include statements automatically based on a set of
recognized types such as sk_buff and bio. The input is all the words
that appear in the BPF program, and the output is a (possibly empty)
string of #include statements, such as "#include <linux/fs.h>".
"""
headers = ""
for header, keywords in cls._auto_includes.items():
for keyword in keywords:
for word in program_words:
if keyword in word and header not in headers:
headers += "#include <%s>\n" % header
return headers
# defined for compatibility reasons, to be removed
Table = Table
class Function(object):
def __init__(self, bpf, name, fd):
self.bpf = bpf
self.name = name
self.fd = fd
@staticmethod
def _find_file(filename):
""" If filename is invalid, search in ./ of argv[0] """
if filename:
if not os.path.isfile(filename):
argv0 = ArgString(sys.argv[0])
t = b"/".join([os.path.abspath(os.path.dirname(argv0)), filename])
if os.path.isfile(t):
filename = t
else:
raise Exception("Could not find file %s" % filename)
return filename
@staticmethod
def find_exe(bin_path):
"""
find_exe(bin_path)
Traverses the PATH environment variable, looking for the first
directory that contains an executable file named bin_path, and
returns the full path to that file, or None if no such file
can be found. This is meant to replace invocations of the
"which" shell utility, which doesn't have portable semantics
for skipping aliases.
"""
# Source: http://stackoverflow.com/a/377028
def is_exe(fpath):
return os.path.isfile(fpath) and \
os.access(fpath, os.X_OK)
fpath, fname = os.path.split(bin_path)
if fpath:
if is_exe(bin_path):
return bin_path
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, bin_path)
if is_exe(exe_file):
return exe_file
return None
def __init__(self, src_file=b"", hdr_file=b"", text=None, debug=0,
cflags=[], usdt_contexts=[]):
"""Create a new BPF module with the given source code.
Note:
All fields are marked as optional, but either `src_file` or `text`
must be supplied, and not both.
Args:
src_file (Optional[str]): Path to a source file for the module
hdr_file (Optional[str]): Path to a helper header file for the `src_file`
text (Optional[str]): Contents of a source file for the module
debug (Optional[int]): Flags used for debug prints, can be |'d together
See "Debug flags" for explanation
"""
src_file = _assert_is_bytes(src_file)
hdr_file = _assert_is_bytes(hdr_file)
text = _assert_is_bytes(text)
self.kprobe_fds = {}
self.uprobe_fds = {}
self.tracepoint_fds = {}
self.raw_tracepoint_fds = {}
self.perf_buffers = {}
self.open_perf_events = {}
self.tracefile = None
atexit.register(self.cleanup)
self.debug = debug
self.funcs = {}
self.tables = {}
self.module = None
cflags_array = (ct.c_char_p * len(cflags))()
for i, s in enumerate(cflags): cflags_array[i] = bytes(ArgString(s))
if text:
ctx_array = (ct.c_void_p * len(usdt_contexts))()
for i, usdt in enumerate(usdt_contexts):
ctx_array[i] = ct.c_void_p(usdt.get_context())
usdt_text = lib.bcc_usdt_genargs(ctx_array, len(usdt_contexts))
if usdt_text is None:
raise Exception("can't generate USDT probe arguments; " +
"possible cause is missing pid when a " +
"probe in a shared object has multiple " +
"locations")
text = usdt_text + text
if text:
self.module = lib.bpf_module_create_c_from_string(text,
self.debug, cflags_array, len(cflags_array))
if not self.module:
raise Exception("Failed to compile BPF text:\n%s" % text)
else:
src_file = BPF._find_file(src_file)
hdr_file = BPF._find_file(hdr_file)
if src_file.endswith(b".b"):
self.module = lib.bpf_module_create_b(src_file, hdr_file,
self.debug)
else:
self.module = lib.bpf_module_create_c(src_file, self.debug,
cflags_array, len(cflags_array))
if not self.module:
raise Exception("Failed to compile BPF module %s" % src_file)
for usdt_context in usdt_contexts:
usdt_context.attach_uprobes(self)
# If any "kprobe__" or "tracepoint__" or "raw_tracepoint__"
# prefixed functions were defined,
# they will be loaded and attached here.
self._trace_autoload()
def load_funcs(self, prog_type=KPROBE):
"""load_funcs(prog_type=KPROBE)
Load all functions in this BPF module with the given type.
Returns a list of the function handles."""
fns = []
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
fns.append(self.load_func(func_name, prog_type))
return fns
def load_func(self, func_name, prog_type):
func_name = _assert_is_bytes(func_name)
if func_name in self.funcs:
return self.funcs[func_name]
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
log_level = 0
if (self.debug & DEBUG_BPF_REGISTER_STATE):
log_level = 2
elif (self.debug & DEBUG_BPF):
log_level = 1
fd = lib.bpf_prog_load(prog_type, func_name,
lib.bpf_function_start(self.module, func_name),
lib.bpf_function_size(self.module, func_name),
lib.bpf_module_license(self.module),
lib.bpf_module_kern_version(self.module),
log_level, None, 0);
if fd < 0:
atexit.register(self.donothing)
if ct.get_errno() == errno.EPERM:
raise Exception("Need super-user privileges to run")
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to load BPF program %s: %s" %
(func_name, errstr))
fn = BPF.Function(self, func_name, fd)
self.funcs[func_name] = fn
return fn
def dump_func(self, func_name):
"""
Return the eBPF bytecodes for the specified function as a string
"""
func_name = _assert_is_bytes(func_name)
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
start, = lib.bpf_function_start(self.module, func_name),
size, = lib.bpf_function_size(self.module, func_name),
return ct.string_at(start, size)
str2ctype = {
u"_Bool": ct.c_bool,
u"char": ct.c_char,
u"wchar_t": ct.c_wchar,
u"unsigned char": ct.c_ubyte,
u"short": ct.c_short,
u"unsigned short": ct.c_ushort,
u"int": ct.c_int,
u"unsigned int": ct.c_uint,
u"long": ct.c_long,
u"unsigned long": ct.c_ulong,
u"long long": ct.c_longlong,
u"unsigned long long": ct.c_ulonglong,
u"float": ct.c_float,
u"double": ct.c_double,
u"long double": ct.c_longdouble,
u"__int128": ct.c_int64 * 2,
u"unsigned __int128": ct.c_uint64 * 2,
}
@staticmethod
def _decode_table_type(desc):
if isinstance(desc, basestring):
return BPF.str2ctype[desc]
anon = []
fields = []
for t in desc[1]:
if len(t) == 2:
fields.append((t[0], BPF._decode_table_type(t[1])))
elif len(t) == 3:
if isinstance(t[2], list):
fields.append((t[0], BPF._decode_table_type(t[1]) * t[2][0]))
elif isinstance(t[2], int):
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
elif isinstance(t[2], basestring) and (
t[2] == u"union" or t[2] == u"struct"):
name = t[0]
if name == "":
name = "__anon%d" % len(anon)
anon.append(name)
fields.append((name, BPF._decode_table_type(t)))
else:
raise Exception("Failed to decode type %s" % str(t))
else:
raise Exception("Failed to decode type %s" % str(t))
base = ct.Structure
if len(desc) > 2:
if desc[2] == u"union":
base = ct.Union
elif desc[2] == u"struct":
base = ct.Structure
cls = type(str(desc[0]), (base,), dict(_anonymous_=anon,
_fields_=fields))
return cls
def get_table(self, name, keytype=None, leaftype=None, reducer=None):
name = _assert_is_bytes(name)
map_id = lib.bpf_table_id(self.module, name)
map_fd = lib.bpf_table_fd(self.module, name)
if map_fd < 0:
raise KeyError
if not keytype:
key_desc = lib.bpf_table_key_desc(self.module, name)
if not key_desc:
raise Exception("Failed to load BPF Table %s key desc" % name)
keytype = BPF._decode_table_type(json.loads(key_desc))
if not leaftype:
leaf_desc = lib.bpf_table_leaf_desc(self.module, name)
if not leaf_desc:
raise Exception("Failed to load BPF Table %s leaf desc" % name)
leaftype = BPF._decode_table_type(json.loads(leaf_desc))
return Table(self, map_id, map_fd, keytype, leaftype, reducer=reducer)
def __getitem__(self, key):
if key not in self.tables:
self.tables[key] = self.get_table(key)
return self.tables[key]
def __setitem__(self, key, leaf):
self.tables[key] = leaf
def __len__(self):
return len(self.tables)
def __delitem__(self, key):
del self.tables[key]
def __iter__(self):
return self.tables.__iter__()
@staticmethod
def attach_raw_socket(fn, dev):
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
sock = lib.bpf_open_raw_sock(dev)
if sock < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to open raw device %s: %s" % (dev, errstr))
res = lib.bpf_attach_socket(sock, fn.fd)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
fn.sock = sock
@staticmethod
def get_kprobe_functions(event_re):
with open("%s/../kprobes/blacklist" % TRACEFS, "rb") as blacklist_f:
blacklist = set([line.rstrip().split()[1] for line in blacklist_f])
fns = []
found_stext = False
with open("/proc/kallsyms", "rb") as avail_file:
for line in avail_file:
(_, t, fn) = line.rstrip().split()[:3]
if found_stext is False:
if fn == b'_stext':
found_stext = True
continue
if fn == b'_etext':
break
if (t.lower() in [b't', b'w']) and re.match(event_re, fn) \
and fn not in blacklist:
fns.append(fn)
return set(fns) # Some functions may appear more than once
def _check_probe_quota(self, num_new_probes):
global _num_open_probes
if _num_open_probes + num_new_probes > _probe_limit:
raise Exception("Number of open probes would exceed global quota")
def _add_kprobe_fd(self, name, fd):
global _num_open_probes
self.kprobe_fds[name] = fd
_num_open_probes += 1
def _del_kprobe_fd(self, name):
global _num_open_probes
del self.kprobe_fds[name]
_num_open_probes -= 1
def _add_uprobe_fd(self, name, fd):
global _num_open_probes
self.uprobe_fds[name] = fd
_num_open_probes += 1
def _del_uprobe_fd(self, name):
global _num_open_probes
del self.uprobe_fds[name]
_num_open_probes -= 1
def get_syscall_prefix(self):
# test bpf syscall kernel func name
if self.ksymname("sys_bpf") != -1:
return "sys_"
if self.ksymname("__x64_sys_bpf") != -1:
return "__x64_sys_"
# none of them, just return "sys_", later API
# calls will return error
return "sys_"
def get_syscall_fnname(self, name):
return self.get_syscall_prefix() + name
def attach_kprobe(self, event=b"", fn_name=b"", event_re=b""):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
matches = BPF.get_kprobe_functions(event_re)
self._check_probe_quota(len(matches))
for line in matches:
try:
self.attach_kprobe(event=line, fn_name=fn_name)
except:
pass
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 0, ev_name, event)
if fd < 0:
raise Exception("Failed to attach BPF to kprobe")
self._add_kprobe_fd(ev_name, fd)
return self
def attach_kretprobe(self, event=b"", fn_name=b"", event_re=b""):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
for line in BPF.get_kprobe_functions(event_re):
try:
self.attach_kretprobe(event=line, fn_name=fn_name)
except:
pass
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 1, ev_name, event)
if fd < 0:
raise Exception("Failed to attach BPF to kretprobe")
self._add_kprobe_fd(ev_name, fd)
return self
def detach_kprobe_event(self, ev_name):
if ev_name not in self.kprobe_fds:
raise Exception("Kprobe %s is not attached" % event)
res = lib.bpf_close_perf_event_fd(self.kprobe_fds[ev_name])
if res < 0:
raise Exception("Failed to close kprobe FD")
res = lib.bpf_detach_kprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from kprobe")
self._del_kprobe_fd(ev_name)
def detach_kprobe(self, event):
event = _assert_is_bytes(event)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
self.detach_kprobe_event(ev_name)
def detach_kretprobe(self, event):
event = _assert_is_bytes(event)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
self.detach_kprobe_event(ev_name)
@staticmethod
def attach_xdp(dev, fn, flags=0):
'''
This function attaches a BPF function to a device on the device
driver level (XDP)
'''
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
res = lib.bpf_attach_xdp(dev, fn.fd, flags)
if res < 0:
err_no = ct.get_errno()
if err_no == errno.EBADMSG:
raise Exception("Internal error while attaching BPF to device,"+
" try increasing the debug level!")
else:
errstr = os.strerror(err_no)
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
@staticmethod
def remove_xdp(dev, flags=0):
'''
This function removes any BPF function from a device on the
device driver level (XDP)
'''
dev = _assert_is_bytes(dev)
res = lib.bpf_attach_xdp(dev, -1, flags)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to detach BPF from device %s: %s"
% (dev, errstr))
@classmethod
def _check_path_symbol(cls, module, symname, addr, pid):
module = _assert_is_bytes(module)
symname = _assert_is_bytes(symname)
sym = bcc_symbol()
c_pid = 0 if pid == -1 else pid
if lib.bcc_resolve_symname(
module, symname,
addr or 0x0, c_pid,
ct.cast(None, ct.POINTER(bcc_symbol_option)),
ct.byref(sym),
) < 0:
raise Exception("could not determine address of symbol %s" % symname)
module_path = ct.cast(sym.module, ct.c_char_p).value
lib.bcc_procutils_free(sym.module)
return module_path, sym.offset
@staticmethod
def find_library(libname):
libname = _assert_is_bytes(libname)
res = lib.bcc_procutils_which_so(libname, 0)
if not res:
return None
libpath = ct.cast(res, ct.c_char_p).value
lib.bcc_procutils_free(res)
return libpath
@staticmethod
def get_tracepoints(tp_re):
results = []
events_dir = os.path.join(TRACEFS, "events")
for category in os.listdir(events_dir):
cat_dir = os.path.join(events_dir, category)
if not os.path.isdir(cat_dir):
continue
for event in os.listdir(cat_dir):
evt_dir = os.path.join(cat_dir, event)
if os.path.isdir(evt_dir):
tp = ("%s:%s" % (category, event))
if re.match(tp_re, tp):
results.append(tp)
return results
@staticmethod
def tracepoint_exists(category, event):
evt_dir = os.path.join(TRACEFS, "events", category, event)
return os.path.isdir(evt_dir)
def attach_tracepoint(self, tp=b"", tp_re=b"", fn_name=b""):
"""attach_tracepoint(tp="", tp_re="", fn_name="")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The optional parameters pid, cpu, and group_fd
can be used to filter the probe. The tracepoint specification is simply
the tracepoint category and the tracepoint name, separated by a colon.
For example: sched:sched_switch, syscalls:sys_enter_bind, etc.
Instead of a tracepoint name, a regular expression can be provided in
tp_re. The program will then attach to tracepoints that match the
provided regular expression.
To obtain a list of kernel tracepoints, use the tplist tool or cat the
file /sys/kernel/debug/tracing/available_events.
Examples:
BPF(text).attach_tracepoint(tp="sched:sched_switch", fn_name="on_switch")
BPF(text).attach_tracepoint(tp_re="sched:.*", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
tp_re = _assert_is_bytes(tp_re)
fn_name = _assert_is_bytes(fn_name)
if tp_re:
for tp in BPF.get_tracepoints(tp_re):
self.attach_tracepoint(tp=tp, fn_name=fn_name)
return
fn = self.load_func(fn_name, BPF.TRACEPOINT)
(tp_category, tp_name) = tp.split(b':')
fd = lib.bpf_attach_tracepoint(fn.fd, tp_category, tp_name)
if fd < 0:
raise Exception("Failed to attach BPF to tracepoint")
self.tracepoint_fds[tp] = fd
return self
def attach_raw_tracepoint(self, tp=b"", fn_name=b""):
"""attach_raw_tracepoint(self, tp=b"", fn_name=b"")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The bpf function should be loaded as a
RAW_TRACEPOINT type. The fn_name is the kernel tracepoint name,
e.g., sched_switch, sys_enter_bind, etc.
Examples:
BPF(text).attach_raw_tracepoint(tp="sched_switch", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
if tp in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s has been attached" % tp)
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.RAW_TRACEPOINT)
fd = lib.bpf_attach_raw_tracepoint(fn.fd, tp)
if fd < 0:
raise Exception("Failed to attach BPF to raw tracepoint")
self.raw_tracepoint_fds[tp] = fd;
return self
def detach_raw_tracepoint(self, tp=b""):
"""detach_raw_tracepoint(tp="")
Stop running the bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_raw_tracepoint("sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s is not attached" % tp)
os.close(self.raw_tracepoint_fds[tp])
del self.raw_tracepoint_fds[tp]
@staticmethod
def support_raw_tracepoint():
# kernel symbol "bpf_find_raw_tracepoint" indicates raw_tracepint support
if BPF.ksymname("bpf_find_raw_tracepoint") != -1:
return True
return False
def detach_tracepoint(self, tp=b""):
"""detach_tracepoint(tp="")
Stop running a bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_tracepoint("sched:sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.tracepoint_fds:
raise Exception("Tracepoint %s is not attached" % tp)
res = lib.bpf_close_perf_event_fd(self.tracepoint_fds[tp])
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
(tp_category, tp_name) = tp.split(b':')
res = lib.bpf_detach_tracepoint(tp_category, tp_name)
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
del self.tracepoint_fds[tp]
def _attach_perf_event(self, progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd):
res = lib.bpf_attach_perf_event(progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
if res < 0:
raise Exception("Failed to attach BPF to perf event")
return res
def attach_perf_event(self, ev_type=-1, ev_config=-1, fn_name=b"",
sample_period=0, sample_freq=0, pid=-1, cpu=-1, group_fd=-1):
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.PERF_EVENT)
res = {}
if cpu >= 0:
res[cpu] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
else:
for i in get_online_cpus():
res[i] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, i, group_fd)
self.open_perf_events[(ev_type, ev_config)] = res
def detach_perf_event(self, ev_type=-1, ev_config=-1):
try:
fds = self.open_perf_events[(ev_type, ev_config)]
except KeyError:
raise Exception("Perf event type {} config {} not attached".format(
ev_type, ev_config))
res = 0
for fd in fds.values():
res = lib.bpf_close_perf_event_fd(fd) or res
if res != 0:
raise Exception("Failed to detach BPF from perf event")
del self.open_perf_events[(ev_type, ev_config)]
@staticmethod
def get_user_functions(name, sym_re):
return set([name for (name, _) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_addresses(name, sym_re):
"""
We are returning addresses here instead of symbol names because it
turns out that the same name may appear multiple times with different
addresses, and the same address may appear multiple times with the same
name. We can't attach a uprobe to the same address more than once, so
it makes sense to return the unique set of addresses that are mapped to
a symbol that matches the provided regular expression.
"""
return set([address for (_, address) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_functions_and_addresses(name, sym_re):
name = _assert_is_bytes(name)
sym_re = _assert_is_bytes(sym_re)
addresses = []
def sym_cb(sym_name, addr):
dname = sym_name
if re.match(sym_re, dname):
addresses.append((dname, addr))
return 0
res = lib.bcc_foreach_function_symbol(name, _SYM_CB_TYPE(sym_cb))
if res < 0:
raise Exception("Error %d enumerating symbols in %s" % (res, name))
return addresses
def _get_uprobe_evname(self, prefix, path, addr, pid):
if pid == -1:
return b"%s_%s_0x%x" % (prefix, self._probe_repl.sub(b"_", path), addr)
else:
# if pid is valid, put pid in the name, so different pid
# can have different event names
return b"%s_%s_0x%x_%d" % (prefix, self._probe_repl.sub(b"_", path), addr, pid)
def attach_uprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1):
"""attach_uprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' is encountered. The real address addr may
be supplied in place of sym. Optional parameters pid, cpu, and group_fd
can be used to filter the probe.
Instead of a symbol name, a regular expression can be provided in
sym_re. The uprobe will then attach to symbols that match the provided
regular expression.
Libraries can be given in the name argument without the lib prefix, or
with the full path (/usr/lib/...). Binaries can be given only with the
full path (/bin/sh). If a PID is given, the uprobe will attach to the
version of the library used by the process.
Example: BPF(text).attach_uprobe("c", "malloc")
BPF(text).attach_uprobe("/usr/bin/python", "main")
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
addresses = BPF.get_user_addresses(name, sym_re)
self._check_probe_quota(len(addresses))
for sym_addr in addresses:
self.attach_uprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 0, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def attach_uretprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1):
"""attach_uretprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' finishes execution. See attach_uprobe for
meaning of additional parameters.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
for sym_addr in BPF.get_user_addresses(name, sym_re):
self.attach_uretprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 1, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uretprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def detach_uprobe_event(self, ev_name):
if ev_name not in self.uprobe_fds:
raise Exception("Uprobe %s is not attached" % ev_name)
res = lib.bpf_close_perf_event_fd(self.uprobe_fds[ev_name])
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
res = lib.bpf_detach_uprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
self._del_uprobe_fd(ev_name)
def detach_uprobe(self, name=b"", sym=b"", addr=None, pid=-1):
"""detach_uprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
self.detach_uprobe_event(ev_name)
def detach_uretprobe(self, name=b"", sym=b"", addr=None, pid=-1):
"""detach_uretprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
self.detach_uprobe_event(ev_name)
def _trace_autoload(self):
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
if func_name.startswith(b"kprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kprobe(event=fn.name[8:], fn_name=fn.name)
elif func_name.startswith(b"kretprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kretprobe(event=fn.name[11:], fn_name=fn.name)
elif func_name.startswith(b"tracepoint__"):
fn = self.load_func(func_name, BPF.TRACEPOINT)
tp = fn.name[len(b"tracepoint__"):].replace(b"__", b":")
self.attach_tracepoint(tp=tp, fn_name=fn.name)
elif func_name.startswith(b"raw_tracepoint__"):
fn = self.load_func(func_name, BPF.RAW_TRACEPOINT)
tp = fn.name[len(b"raw_tracepoint__"):]
self.attach_raw_tracepoint(tp=tp, fn_name=fn.name)
def trace_open(self, nonblocking=False):
"""trace_open(nonblocking=False)
Open the trace_pipe if not already open
"""
if not self.tracefile:
self.tracefile = open("%s/trace_pipe" % TRACEFS, "rb")
if nonblocking:
fd = self.tracefile.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return self.tracefile
def trace_fields(self, nonblocking=False):
"""trace_fields(nonblocking=False)
Read from the kernel debug trace pipe and return a tuple of the
fields (task, pid, cpu, flags, timestamp, msg) or None if no
line was read (nonblocking=True)
"""
try:
while True:
line = self.trace_readline(nonblocking)
if not line and nonblocking: return (None,) * 6
# don't print messages related to lost events
if line.startswith(b"CPU:"): continue
task = line[:16].lstrip()
line = line[17:]
ts_end = line.find(b":")
pid, cpu, flags, ts = line[:ts_end].split()
cpu = cpu[1:-1]
# line[ts_end:] will have ": [sym_or_addr]: msgs"
# For trace_pipe debug output, the addr typically
# is invalid (e.g., 0x1). For kernel 4.12 or earlier,
# if address is not able to match a kernel symbol,
# nothing will be printed out. For kernel 4.13 and later,
# however, the illegal address will be printed out.
# Hence, both cases are handled here.
line = line[ts_end + 1:]
sym_end = line.find(":")
msg = line[sym_end + 2:]
return (task, int(pid), int(cpu), flags, float(ts), msg)
except KeyboardInterrupt:
exit()
def trace_readline(self, nonblocking=False):
"""trace_readline(nonblocking=False)
Read from the kernel debug trace pipe and return one line
If nonblocking is False, this will block until ctrl-C is pressed.
"""
trace = self.trace_open(nonblocking)
line = None
try:
line = trace.readline(1024).rstrip()
except IOError:
pass
except KeyboardInterrupt:
exit()
return line
def trace_print(self, fmt=None):
"""trace_print(self, fmt=None)
Read from the kernel debug trace pipe and print on stdout.
If fmt is specified, apply as a format string to the output. See
trace_fields for the members of the tuple
example: trace_print(fmt="pid {1}, msg = {5}")
"""
try:
while True:
if fmt:
fields = self.trace_fields(nonblocking=False)
if not fields: continue
line = fmt.format(*fields)
else:
line = self.trace_readline(nonblocking=False)
print(line)
sys.stdout.flush()
except KeyboardInterrupt:
exit()
@staticmethod
def _sym_cache(pid):
"""_sym_cache(pid)
Returns a symbol cache for the specified PID.
The kernel symbol cache is accessed by providing any PID less than zero.
"""
if pid < 0 and pid != -1:
pid = -1
if not pid in BPF._sym_caches:
BPF._sym_caches[pid] = SymbolCache(pid)
return BPF._sym_caches[pid]
@staticmethod
def sym(addr, pid, show_module=False, show_offset=False, demangle=True):
"""sym(addr, pid, show_module=False, show_offset=False)
Translate a memory address into a function name for a pid, which is
returned. When show_module is True, the module name is also included.
When show_offset is True, the instruction offset as a hexadecimal
number is also included in the string.
A pid of less than zero will access the kernel symbol cache.
Example output when both show_module and show_offset are True:
"start_thread+0x202 [libpthread-2.24.so]"
Example output when both show_module and show_offset are False:
"start_thread"
"""
name, offset, module = BPF._sym_cache(pid).resolve(addr, demangle)
offset = b"+0x%x" % offset if show_offset and name is not None else b""
name = name or b"[unknown]"
name = name + offset
module = b" [%s]" % os.path.basename(module) \
if show_module and module is not None else b""
return name + module
@staticmethod
def ksym(addr, show_module=False, show_offset=False):
"""ksym(addr)
Translate a kernel memory address into a kernel function name, which is
returned. When show_module is True, the module name ("kernel") is also
included. When show_offset is true, the instruction offset as a
hexadecimal number is also included in the string.
Example output when both show_module and show_offset are True:
"default_idle+0x0 [kernel]"
"""
return BPF.sym(addr, -1, show_module, show_offset, False)
@staticmethod
def ksymname(name):
"""ksymname(name)
Translate a kernel name into an address. This is the reverse of
ksym. Returns -1 when the function name is unknown."""
return BPF._sym_cache(-1).resolve_name(None, name)
def num_open_kprobes(self):
"""num_open_kprobes()
Get the number of open K[ret]probes. Can be useful for scenarios where
event_re is used while attaching and detaching probes.
"""
return len(self.kprobe_fds)
def num_open_uprobes(self):
"""num_open_uprobes()
Get the number of open U[ret]probes.
"""
return len(self.uprobe_fds)
def num_open_tracepoints(self):
"""num_open_tracepoints()
Get the number of open tracepoints.
"""
return len(self.tracepoint_fds)
def perf_buffer_poll(self, timeout = -1):
"""perf_buffer_poll(self)
Poll from all open perf ring buffers, calling the callback that was
provided when calling open_perf_buffer for each entry.
"""
try:
readers = (ct.c_void_p * len(self.perf_buffers))()
for i, v in enumerate(self.perf_buffers.values()):
readers[i] = v
lib.perf_reader_poll(len(readers), readers, timeout)
except KeyboardInterrupt:
exit()
def kprobe_poll(self, timeout = -1):
"""kprobe_poll(self)
Deprecated. Use perf_buffer_poll instead.
"""
self.perf_buffer_poll(timeout)
def donothing(self):
"""the do nothing exit handler"""
def cleanup(self):
# Clean up opened probes
for k, v in list(self.kprobe_fds.items()):
self.detach_kprobe_event(k)
for k, v in list(self.uprobe_fds.items()):
self.detach_uprobe_event(k)
for k, v in list(self.tracepoint_fds.items()):
self.detach_tracepoint(k)
for k, v in list(self.raw_tracepoint_fds.items()):
self.detach_raw_tracepoint(k)
# Clean up opened perf ring buffer and perf events
table_keys = list(self.tables.keys())
for key in table_keys:
if isinstance(self.tables[key], PerfEventArray):
del self.tables[key]
for (ev_type, ev_config) in list(self.open_perf_events.keys()):
self.detach_perf_event(ev_type, ev_config)
if self.tracefile:
self.tracefile.close()
self.tracefile = None
if self.module:
lib.bpf_module_destroy(self.module)
self.module = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
from .usdt import USDT, USDTException
| python | 46,379 |
'''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
from .utils import DropoutConv2d
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
# no dropout
self.drop = 0
def forward(self, x):
for m in self.modules():
if isinstance(m, DropoutConv2d):
m.drop = self.drop
x = self.features(x)
x = x.view(x.size(0), -1)
x = F.dropout(x, p=self.drop, training=True) # force dropout
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, DropoutConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = DropoutConv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
| python | 4,340 |
//1st task
a = int(input("enter first number: "))
b = int(input("enter second number: "))
if a > 0:
if b>0:
sum = a + b
print("sum:", sum)
else:
print("The Number2 is 0.so we can't perform the operation.")
else:
print("The Number1 is 0.so we can't perform the operation.")
//2nd task
def find_max( list ):
max = list[ 0 ]
for a in list:
if a > max:
max = a
return max
num = int(input('How many numbers: '))
lst = []
for n in range(num):
numbers = int(input('Enter number '))
lst.append(numbers)
print("Maximum element in the list is :", find_max(lst))
3ed task
import pandas as pd
dataset=pd.read_csv("fish.csv")
print(dataset)
| python | 778 |
# prefer setuptools over distutils
from setuptools import setup, find_packages
# use a consistent encoding
from codecs import open
from os import path
import json
import sys
is_python_2 = sys.version_info < (3, 0)
here = path.abspath(path.dirname(__file__))
root = path.dirname(here)
readme_rst = path.join(here, 'README.rst')
package_json = path.join(here, 'package.json')
# a workaround when installing locally from git repository with pip install -e .
if not path.isfile(package_json):
package_json = path.join(root, 'package.json')
# long description from README file
with open(readme_rst, encoding='utf-8') as f:
long_description = f.read()
# version number and all other params from package.json
with open(package_json, encoding='utf-8') as f:
package = json.load(f)
setup(
name=package['name'],
version=package['version'],
description=package['description'],
long_description=long_description,
url=package['homepage'],
author=package['author']['name'],
author_email=package['author']['email'],
license=package['license'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Information Technology',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: JavaScript',
'Programming Language :: PHP',
'Operating System :: OS Independent',
'Environment :: Console'
],
keywords=package['keywords'],
packages=find_packages(exclude=['ccxt.async_support*'] if is_python_2 else []),
install_requires=[
'setuptools>=38.5.1',
'certifi>=2018.1.18',
'requests>=2.18.4'
],
extras_require={
':python_version>="3.5.2"': [
'aiohttp>=3.0.1',
'aiodns==1.1.1',
'yarl==1.1.0',
],
'qa': [
'flake8==3.5.0'
],
'doc': [
'Sphinx==1.7.0'
]
}
)
| python | 2,501 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.typing as tp
from util import convert_to_onnx_and_check
def test_bn_nchw(test_case):
@flow.global_function()
def bn(x: tp.Numpy.Placeholder((3, 4, 2, 5))):
params_shape = (4,)
mean = flow.get_variable(
name="mean",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
variance = flow.get_variable(
name="var",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
gamma = flow.get_variable(
name="gamma",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
beta = flow.get_variable(
name="beta",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
return flow.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-5, axis=1)
convert_to_onnx_and_check(bn)
def test_bn_nhwc(test_case):
@flow.global_function()
def bn(x: tp.Numpy.Placeholder((3, 4, 2, 5))):
params_shape = (5,)
mean = flow.get_variable(
name="mean",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
variance = flow.get_variable(
name="var",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
gamma = flow.get_variable(
name="gamma",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
beta = flow.get_variable(
name="beta",
shape=params_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
return flow.nn.batch_normalization(
x, mean, variance, beta, gamma, 1e-5, axis=-1
)
convert_to_onnx_and_check(bn)
| python | 2,761 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configuration and hyperparameter sweeps."""
# pylint: disable=line-too-long
import ml_collections
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.seed = 42
config.trial = 0 # Dummy for repeated runs.
# Use "caltech_birds2011_train-test" for train-test split.
config.dataset = "caltech_birds2011_train-test"
config.batch_size = 16
# From Learning to Navigate for Fine-grained Classification
# https://arxiv.org/pdf/1809.00287.pdf
# Code: https://github.com/yangze0930/NTS-Net/blob/master/core/dataset.py#L41
# config.train_preprocess_str = "to_float_0_1|resize((600, 600))|random_crop((448,448))|random_left_right_flip|normalize(mu=(0.485, 0.456, 0.406), sigma=(0.229, 0.224, 0.225))"
# config.eval_preprocess_str = "to_float_0_1|resize((600, 600))|central_crop((448,448))|normalize(mu=(0.485, 0.456, 0.406), sigma=(0.229, 0.224, 0.225))"
config.train_preprocess_str = ("to_float_0_1"
"|resize((600, 600))"
"|random_crop((448,448))"
"|random_left_right_flip"
"|value_range(-1,1)")
config.eval_preprocess_str = ("to_float_0_1"
"|resize((600, 600))"
"|central_crop((448,448))"
"|value_range(-1,1)")
config.k = 4
config.ptopk_sigma = 0.05
config.ptopk_num_samples = 500
config.selection_method = "perturbed-topk"
config.linear_decrease_perturbed_sigma = True
config.entropy_regularizer = -0.05
config.part_dropout = False
config.communication = "squeeze_excite_d"
# Insert link to a checkpoint file.
# contact original authors if the checkpoint from the paper is needed.
config.pretrained_checkpoint = ""
config.pretrained_prefix = ""
# == optimization
config.num_train_steps = 31300
config.optimizer = "sgd"
config.learning_rate = 1e-3
config.momentum = .9
config.weight_decay_coupled = 1e-4
config.cosine_decay = False
config.learning_rate_step_decay = 0.1
config.learning_rate_decay_at_steps = [18780, 25040]
config.warmup_ratio = 0.05
config.log_loss_every_steps = 100
config.eval_every_steps = 5000
config.checkpoint_every_steps = 1000
config.debug = False
return config
def get_sweep(h):
"""Get the hyperparamater sweep."""
sweeps = []
sweeps.append(h.sweep("config.k", [2, 4]))
sweeps.append(h.sweep("config.seed", [3, 5, 7, 9, 11]))
return h.product(sweeps)
| python | 3,192 |
#!/usr/bin/env python
# Dongji Gao
# We're using python 3.x style but want it to work in python 2.x
from __future__ import print_function
import argparse
import sys
import math
parser = argparse.ArgumentParser(description="This script evaluates the log probabilty (default log base is e) of each sentence "
"from data (in text form), given a language model in arpa form "
"and a specific ngram order.",
epilog="e.g. ./compute_sentence_probs_arpa.py ARPA_LM NGRAM_ORDER TEXT_IN PROB_FILE --log-base=LOG_BASE",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("arpa_lm", type=str,
help="Input language model in arpa form.")
parser.add_argument("ngram_order", type=int,
help="Order of ngram")
parser.add_argument("text_in", type=str,
help="Filename of input text file (each line will be interpreted as a sentence).")
parser.add_argument("prob_file", type=str,
help="Filename of output probability file.")
parser.add_argument("--log-base", type=float, default=math.exp(1),
help="Log base for log porbability")
args = parser.parse_args()
def check_args(args):
args.text_in_handle = sys.stdin if args.text_in == "-" else open(args.text_in, "r")
args.prob_file_handle = sys.stdout if args.prob_file == "-" else open(args.prob_file, "w")
if args.log_base <= 0:
sys.exit("compute_sentence_probs_arpa.py: Invalid log base (must be greater than 0)")
def is_logprob(input):
if input[0] == "-":
try:
float(input[1:])
return True
except:
return False
else:
return False
def check_number(model_file, tot_num):
cur_num = 0
max_ngram_order = 0
with open(model_file) as model:
lines = model.readlines()
for line in lines[1:]:
if "=" not in line:
return (cur_num == tot_num), max_ngram_order
cur_num += int(line.split("=")[-1])
max_ngram_order = int(line.split("=")[0].split()[-1])
# This function load language model in arpa form and save in a dictionary for
# computing sentence probabilty of input text file.
def load_model(model_file):
with open(model_file) as model:
ngram_dict = {}
lines = model.readlines()
# check arpa form
if lines[0][:-1] != "\\data\\":
sys.exit("compute_sentence_probs_arpa.py: Please make sure that language model is in arpa form.")
# read line
for line in lines:
if line[0] == "-":
line_split = line.split()
if is_logprob(line_split[-1]):
ngram_key = " ".join(line_split[1:-1])
if ngram_key in ngram_dict:
sys.exit("compute_sentence_probs_arpa.py: Duplicated ngram in arpa language model: {}.".format(ngram_key))
ngram_dict[ngram_key] = (line_split[0], line_split[-1])
else:
ngram_key = " ".join(line_split[1:])
if ngram_key in ngram_dict:
sys.exit("compute_sentence_probs_arpa.py: Duplicated ngram in arpa language model: {}.".format(ngram_key))
ngram_dict[ngram_key] = (line_split[0],)
return ngram_dict, len(ngram_dict)
def compute_sublist_prob(sub_list):
if len(sub_list) == 0:
sys.exit("compute_sentence_probs_arpa.py: Ngram substring not found in arpa language model, please check.")
sub_string = " ".join(sub_list)
if sub_string in ngram_dict:
return -float(ngram_dict[sub_string][0][1:])
else:
backoff_substring = " ".join(sub_list[:-1])
backoff_weight = 0.0 if (backoff_substring not in ngram_dict or len(ngram_dict[backoff_substring]) < 2) \
else -float(ngram_dict[backoff_substring][1][1:])
return compute_sublist_prob(sub_list[1:]) + backoff_weight
def compute_begin_prob(sub_list):
logprob = 0
for i in range(1, len(sub_list) - 1):
logprob += compute_sublist_prob(sub_list[:i + 1])
return logprob
# The probability is computed in this way:
# p(word_N | word_N-1 ... word_1) = ngram_dict[word_1 ... word_N][0].
# Here gram_dict is a dictionary stores a tuple corresponding to ngrams.
# The first element of tuple is probablity and the second is backoff probability (if exists).
# If the particular ngram (word_1 ... word_N) is not in the dictionary, then
# p(word_N | word_N-1 ... word_1) = p(word_N | word_(N-1) ... word_2) * backoff_weight(word_(N-1) | word_(N-2) ... word_1)
# If the sequence (word_(N-1) ... word_1) is not in the dictionary, then the backoff_weight gets replaced with 0.0 (log1)
# More details can be found in https://cmusphinx.github.io/wiki/arpaformat/
def compute_sentence_prob(sentence, ngram_order):
sentence_split = sentence.split()
for i in range(len(sentence_split)):
if sentence_split[i] not in ngram_dict:
sentence_split[i] = "<unk>"
sen_length = len(sentence_split)
if sen_length < ngram_order:
return compute_begin_prob(sentence_split)
else:
logprob = 0
begin_sublist = sentence_split[:ngram_order]
logprob += compute_begin_prob(begin_sublist)
for i in range(sen_length - ngram_order + 1):
cur_sublist = sentence_split[i : i + ngram_order]
logprob += compute_sublist_prob(cur_sublist)
return logprob
def output_result(text_in_handle, output_file_handle, ngram_order):
lines = text_in_handle.readlines()
logbase_modifier = math.log(10, args.log_base)
for line in lines:
new_line = "<s> " + line[:-1] + " </s>"
logprob = compute_sentence_prob(new_line, ngram_order)
new_logprob = logprob * logbase_modifier
output_file_handle.write("{}\n".format(new_logprob))
text_in_handle.close()
output_file_handle.close()
if __name__ == "__main__":
check_args(args)
ngram_dict, tot_num = load_model(args.arpa_lm)
num_valid, max_ngram_order = check_number(args.arpa_lm, tot_num)
if not num_valid:
sys.exit("compute_sentence_probs_arpa.py: Wrong loading model.")
if args.ngram_order <= 0 or args.ngram_order > max_ngram_order:
sys.exit("compute_sentence_probs_arpa.py: " +
"Invalid ngram_order (either negative or greater than maximum ngram number ({}) allowed)".format(max_ngram_order))
output_result(args.text_in_handle, args.prob_file_handle, args.ngram_order)
| python | 6,721 |
"""
We will use this script to teach Python to absolute beginners
The script is an example of Fizz-Buzz implemented in Python
The FizzBuzz problem:
For all integers between 1 and 99 (include both):
# print fizz for multiples of 3
# print buzz for multiples of 5
# print fizzbuzz for multiples of 3 and 5"
"""
def fizzbuzz(max_num):
"This method implements FizzBuzz"
# Google for 'range in python' to see what it does
for i in range(1,max_num):
# % or modulo division gives you the remainder
if i%3==0 and i%5==0:
print(i,"fizzbuzz")
elif i%3==0:
print(i,"fizz")
elif i%5==0:
print(i,"Buzz")
#----START OF SCRIPT
if __name__=='__main__':
fizzbuzz(int('16'))
| python | 759 |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of the user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of the user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""To return the model as a string"""
return self.status_text
| python | 2,112 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 指定主机名称列表文件,文件中主机名称有误,检查磁盘状态
Description :
1.将错误主机名写入列表文件
2.root用户检查主机磁盘状态
3.清理环境
Expect :
1.将错误主机名写入列表文件成功
2.root用户检查主机磁盘信息失败
3.清理环境成功
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Logger import Logger
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.root_user = Node('default')
def test_server_tools1(self):
self.log.info('-Opengauss_Function_Tools_gs_checkos_Case0039开始-')
self.log.info('-----将错误主机名写入列表文件-----')
echo_cmd = f'echo \'ctupddl0000\' > /home/hostfile.txt'
self.log.info(echo_cmd)
msg = self.root_user.sh(echo_cmd).result()
self.log.info(msg)
self.log.info('-指定主机名称列表文件,文件中主机名称有误,检查磁盘状态-')
checkos_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_checkos -i A8 -f /home/hostfile.txt'
self.log.info(checkos_cmd)
checkos_msg = self.root_user.sh(checkos_cmd).result()
self.log.info(checkos_msg)
self.assertIn('Check_Disk_Configure -l '
'\'/tmp/gs_checkos/gs_local.log\' . Error', checkos_msg)
def tearDown(self):
self.log.info('-----------------清理环境----------------')
rm_cmd = f'rm -rf /home/hostfile.txt'
self.log.info(rm_cmd)
rm_msg = self.root_user.sh(rm_cmd).result()
self.log.info(rm_msg)
self.log.info('-Opengauss_Function_Tools_gs_checkos_Case0039结束-')
| python | 2,038 |
import abc
import numpy as np
import tensorflow as tf
from elasticdl.python.common.constants import DistributionStrategy
from elasticdl.python.common.log_utils import default_logger as logger
from elasticdl.python.common.model_utils import (
restore_model_params_from_checkpoint,
)
from elasticdl.python.elasticdl.layers.embedding import Embedding
from elasticdl.python.master.checkpoint_service import (
get_valid_lastest_version_dir,
)
from elasticdl.python.ps.embedding_table import EmbeddingTable
def _get_trained_params_from_checkpoint(checkpoint_dir):
parameters = restore_model_params_from_checkpoint(checkpoint_dir, 0, 1)
trained_params = parameters.non_embedding_params
for name, table in parameters.embedding_params.items():
# The name of variable in a tf.keras.layers.Embedding layer is
# "{layer_name}/embeddings:0"
var_name = name + "/embeddings:0"
trained_params[var_name] = table
return trained_params
def _convert_embedding_vector_to_variable(embedding_shape, embedding_table):
embedding_ids = list(embedding_table.embedding_vectors.keys())
embedding_values = list(embedding_table.embedding_vectors.values())
embedding_weights = np.zeros(embedding_shape)
embedding_weights[embedding_ids] = embedding_values
return embedding_weights
class ModelHandler(metaclass=abc.ABCMeta):
"""Generate the model to train in ElasticDL for different distributed
strategies and export trained model in ElasticDL to SavedModel.
"""
@abc.abstractmethod
def get_model_to_train(self, model):
"""Generate a model to train in ElasticDL.
Args:
model: A native keras model instance.
Returns:
A keras model instance for ElasticDL training.
"""
@abc.abstractmethod
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported a SavedModel
by tf.saved_model.save.
Args:
model: A keras model instance trained by ElasticDL and
it may contains `elasticdl.layers.Embedding` layers.
dataset: A `tf.data.Dataset` instance which has the same outputs as
the training dataset.
Returns:
A keras model instance trained by ElasticDL.
"""
@classmethod
def get_model_handler(
cls, distribution_strategy=None, checkpoint_dir=None
):
"""Create a model handler to process the model for the
distributed strategy.
Args:
distribution_strategy (string): distribution strategy name
checkpoint_dir: Checkpoint directory to save model parametes
during training.
Return:
ModelHandler subclass instance.
"""
if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir)
elif distribution_strategy == DistributionStrategy.ALLREDUCE:
logger.warning(
"Allreduce distribution strategy is not supported yet. "
"Switching to use the default distribution strategy."
)
return DefaultModelHandler()
class DefaultModelHandler(ModelHandler):
"""Return the origin model to train and export."""
def get_model_to_train(self, model):
return model
def get_model_to_export(self, model, dataset):
"""
Get model with inputs and trained parameters to export.
"""
if not model.inputs:
model._build_model_with_inputs(inputs=dataset, targets=None)
return model
class ParameterServerModelHandler(ModelHandler):
"""Model handler for parameter server strategy.
For training, The handler will replace `tf.keras.layers.Embedding`
layers with`elasticdl.layers.Embedding` for training.
For saving model, the handler will restore Keras model definition and
pull trained parameters from parameter server(s) for the model.
"""
def __init__(self, checkpoint_dir=None):
"""
Arguments:
checkpoint_dir: A checkpoint directory to save all model
parameters during training.
"""
self._checkpoint_dir = checkpoint_dir
def get_model_to_train(self, model):
"""Replace the tf.keras.layers.Embedding layer in the model with
an elasticdl.layers.Embedding layer in ParameterServerStrategy.
"""
if type(model) == tf.keras.Sequential or model._is_graph_network:
model = self._replace_embedding_layer_to_clone_model(
model, tf.keras.layers.Embedding, Embedding
)
else:
model = self._replace_embedding_attributes_for_subclass(
model, tf.keras.layers.Embedding, Embedding
)
return model
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported to a SavedModel by
`tf.saved_model.save`.
"""
model = self._restore_keras_model_def(model)
if not model.inputs:
# build model to add inputs and outputs that
# can be consumed by tf-serving
model._build_model_with_inputs(inputs=dataset, targets=None)
checkpoint_dir = get_valid_lastest_version_dir(self._checkpoint_dir)
if checkpoint_dir is None:
logger.warning("No available checkpoint to export model")
return model
trained_params = _get_trained_params_from_checkpoint(checkpoint_dir)
for var in model.trainable_variables:
if isinstance(trained_params[var.name], EmbeddingTable):
embedding_params = _convert_embedding_vector_to_variable(
var.shape, trained_params[var.name]
)
var.assign(embedding_params)
else:
var.assign(trained_params[var.name].numpy())
return model
def _restore_keras_model_def(self, model):
"""Restore Keras model definition by replacing
`elasticdl.layers.Embedding` layers with
`tf.keras.layers.Embedding` layers.
"""
# clear keras model session to avoid clutter from old models/layers.
tf.keras.backend.clear_session()
if (
isinstance(model, tf.keras.models.Model)
and not model._is_graph_network
):
model = self._replace_embedding_attributes_for_subclass(
model, Embedding, tf.keras.layers.Embedding
)
else:
model = self._replace_embedding_layer_to_clone_model(
model, Embedding, tf.keras.layers.Embedding
)
return model
@staticmethod
def _replace_embedding_layer_to_clone_model(
model, src_embedding_class, dst_embedding_class
):
"""Clone a new model by cloning model and replace the
src_embedding_class layer with a dst_embedding_class.
"""
def _clone_function(layer):
if type(layer) == src_embedding_class:
logger.debug(
"Replace {} with {}".format(
src_embedding_class, dst_embedding_class
)
)
# ElasticDL embedding only accept a string type initializer
if src_embedding_class == Embedding:
init = tf.keras.initializers.get(
layer.embeddings_initializer
)
if dst_embedding_class == Embedding:
init = tf.keras.initializers.serialize(
layer.embeddings_initializer
)["class_name"]
embedding_layer = dst_embedding_class(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=init,
mask_zero=layer.mask_zero,
input_length=layer.input_length,
name=layer.name,
)
return embedding_layer
return layer
return tf.keras.models.clone_model(
model, clone_function=_clone_function
)
@staticmethod
def _replace_embedding_attributes_for_subclass(
model, src_embedding_class, dst_embedding_class
):
"""Replace the keras embedding attribute with
elasticdl.layers.Embedding layer.
"""
for name, value in model.__dict__.items():
if type(value) == src_embedding_class:
embedding_layer = dst_embedding_class(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=value.embeddings_initializer,
mask_zero=value.mask_zero,
input_length=value.input_length,
)
setattr(model, name, embedding_layer)
return model
| python | 9,038 |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
wait_until,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
p2p.send_message(tx_message)
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
if with_witness:
p2p.send_message(msg_witness_block(block))
else:
p2p.send_message(msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0", "-mempoolreplacement=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=VB_TOP_BITS):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
wait_until(lambda: 'reject' in self.test_node.last_message and self.test_node.last_message["reject"].reason == b"unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block()
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propogate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
# Stredle: SCRIPT_VERIFY_WITNESS is enforced when segwit is activated
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time()) + 10)
self.nodes[2].setmocktime(int(time.time()) + 10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the script_sig, should also fail.
spend_tx.vin[0].script_sig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, b'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(version=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'invalid')
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
if __name__ == '__main__':
SegWitTest().main()
| python | 96,058 |
import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(name='micropython-email.charset',
version='0.5.1',
description='CPython email.charset module ported to MicroPython',
long_description='This is a module ported from CPython standard library to be compatible with\nMicroPython interpreter. Usually, this means applying small patches for\nfeatures not supported (yet, or at all) in MicroPython. Sometimes, heavier\nchanges are required. Note that CPython modules are written with availability\nof vast resources in mind, and may not work for MicroPython ports with\nlimited heap. If you are affected by such a case, please help reimplement\nthe module from scratch.',
url='https://github.com/pfalcon/micropython-lib',
author='CPython Developers',
author_email='[email protected]',
maintainer='Paul Sokolovsky',
maintainer_email='[email protected]',
license='Python',
cmdclass={'sdist': sdist_upip.sdist},
packages=['email'],
install_requires=['micropython-functools', 'micropython-email.encoders', 'micropython-email.errors'])
| python | 1,271 |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import argparse
import logging
class AssembleArgs:
manifest: str
keep: bool
def __init__(self):
parser = argparse.ArgumentParser(description="Assemble an OpenSearch Distribution")
parser.add_argument("manifest", type=argparse.FileType("r"), help="Manifest file.")
parser.add_argument("-b", "--base-url", dest="base_url", help="The base url to download the artifacts.")
parser.add_argument(
"--keep",
dest="keep",
action="store_true",
help="Do not delete the working temporary directory.",
)
parser.add_argument(
"-v",
"--verbose",
help="Show more verbose output.",
action="store_const",
default=logging.INFO,
const=logging.DEBUG,
dest="logging_level",
)
args = parser.parse_args()
self.logging_level = args.logging_level
self.manifest = args.manifest
self.keep = args.keep
self.base_url = args.base_url
| python | 1,240 |
import sys
import os
import unittest
from scrapy.item import Item, Field
from scrapy.utils.misc import arg_to_iter, create_instance, load_object, set_environ, walk_modules
from tests import mock
__doctests__ = ['scrapy.utils.misc']
class UtilsMiscTestCase(unittest.TestCase):
def test_load_object(self):
obj = load_object('scrapy.utils.misc.load_object')
assert obj is load_object
self.assertRaises(ImportError, load_object, 'nomodule999.mod.function')
self.assertRaises(NameError, load_object, 'scrapy.utils.misc.load_object999')
def test_walk_modules(self):
mods = walk_modules('tests.test_utils_misc.test_walk_modules')
expected = [
'tests.test_utils_misc.test_walk_modules',
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod')
expected = [
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod1')
expected = [
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
self.assertRaises(ImportError, walk_modules, 'nomodule999')
def test_walk_modules_egg(self):
egg = os.path.join(os.path.dirname(__file__), 'test.egg')
sys.path.append(egg)
try:
mods = walk_modules('testegg')
expected = [
'testegg.spiders',
'testegg.spiders.a',
'testegg.spiders.b',
'testegg'
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
finally:
sys.path.remove(egg)
def test_arg_to_iter(self):
class TestItem(Item):
name = Field()
assert hasattr(arg_to_iter(None), '__iter__')
assert hasattr(arg_to_iter(100), '__iter__')
assert hasattr(arg_to_iter('lala'), '__iter__')
assert hasattr(arg_to_iter([1, 2, 3]), '__iter__')
assert hasattr(arg_to_iter(l for l in 'abcd'), '__iter__')
self.assertEqual(list(arg_to_iter(None)), [])
self.assertEqual(list(arg_to_iter('lala')), ['lala'])
self.assertEqual(list(arg_to_iter(100)), [100])
self.assertEqual(list(arg_to_iter(l for l in 'abc')), ['a', 'b', 'c'])
self.assertEqual(list(arg_to_iter([1, 2, 3])), [1, 2, 3])
self.assertEqual(list(arg_to_iter({'a':1})), [{'a': 1}])
self.assertEqual(list(arg_to_iter(TestItem(name="john"))), [TestItem(name="john")])
def test_create_instance(self):
settings = mock.MagicMock()
crawler = mock.MagicMock(spec_set=['settings'])
args = (True, 100.)
kwargs = {'key': 'val'}
def _test_with_settings(mock, settings):
create_instance(mock, settings, None, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
self.assertEqual(mock.from_crawler.call_count, 0)
if hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
def _test_with_crawler(mock, settings, crawler):
create_instance(mock, settings, crawler, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
mock.from_crawler.assert_called_once_with(crawler, *args,
**kwargs)
if hasattr(mock, 'from_settings'):
self.assertEqual(mock.from_settings.call_count, 0)
self.assertEqual(mock.call_count, 0)
elif hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
# Check usage of correct constructor using four mocks:
# 1. with no alternative constructors
# 2. with from_settings() constructor
# 3. with from_crawler() constructor
# 4. with from_settings() and from_crawler() constructor
spec_sets = ([], ['from_settings'], ['from_crawler'],
['from_settings', 'from_crawler'])
for specs in spec_sets:
m = mock.MagicMock(spec_set=specs)
_test_with_settings(m, settings)
m.reset_mock()
_test_with_crawler(m, settings, crawler)
# Check adoption of crawler settings
m = mock.MagicMock(spec_set=['from_settings'])
create_instance(m, None, crawler, *args, **kwargs)
m.from_settings.assert_called_once_with(crawler.settings, *args,
**kwargs)
with self.assertRaises(ValueError):
create_instance(m, None, None)
def test_set_environ(self):
assert os.environ.get('some_test_environ') is None
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') is None
os.environ['some_test_environ'] = 'test'
assert os.environ.get('some_test_environ') == 'test'
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') == 'test'
if __name__ == "__main__":
unittest.main()
| python | 6,145 |
"""`Factory` specialization with limitation to provided type example."""
from dependency_injector import containers, providers, errors
class BaseService:
...
class SomeService(BaseService):
...
class ServiceProvider(providers.Factory):
provided_type = BaseService
# Creating service provider with a correct provided type:
class Services(containers.DeclarativeContainer):
some_service_provider = ServiceProvider(SomeService)
# Trying to create service provider an incorrect provided type:
try:
class Container(containers.DeclarativeContainer):
some_service_provider = ServiceProvider(object)
except errors.Error as exception:
print(exception)
# The output is:
# <class "__main__.ServiceProvider"> can provide only
# <class "__main__.BaseService"> instances
| python | 813 |
# for basic exchange operations
import math
from ztom import errors
def get_trade_direction_to_currency(symbol: str, dest_currency: str):
cs = symbol.split("/")
if cs[0] == dest_currency:
return "buy"
elif cs[1] == dest_currency:
return "sell"
else:
return False
def get_symbol(c1: str, c2: str, markets: dict):
if c1 + "/" + c2 in markets:
a = c1 + "/" + c2
elif c2 + "/" + c1 in markets:
a = c2 + "/" + c1
else:
return False
return a
def get_order_type(source_cur: str, dest_cur: str, symbol: str):
if source_cur + "/" + dest_cur == symbol:
a = "sell"
elif dest_cur + "/" + source_cur == symbol:
a = "buy"
else:
a = False
return a
def get_symbol_order_price_from_tickers(source_cur: str, dest_cur: str, tickers: dict):
"""
returns dict with taker side and price for converting currency source_cur to dest_cur, using the ticker(-s) dict
:param source_cur: str
:param dest_cur: str
:param tickers: ticker (-s) dict {"sym/bol":{"ask":value, "bid":value}}
:return: dict of {"symbol": symbol,
"order_type": "buy" or "sell",
"price_type": "ask" or "bid",
"price": price,
"maker_price_type": "bid" or "ask",
"maker_price":val}
"""
if source_cur + "/" + dest_cur in tickers:
symbol = source_cur + "/" + dest_cur
order_type = "sell"
price_type = "bid"
maker_price_type = "ask"
elif dest_cur + "/" + source_cur in tickers:
symbol = dest_cur + "/" + source_cur
order_type = "buy"
price_type = "ask"
maker_price_type = "bid"
else:
return None
if symbol in tickers:
price = tickers[symbol][price_type] if price_type in tickers[symbol] and \
tickers[symbol][price_type] > 0 else None
maker_price = tickers[symbol][maker_price_type] if maker_price_type in tickers[symbol] and \
tickers[symbol][maker_price_type] > 0 else None
else:
price = None
a = dict({"symbol": symbol, "order_type": order_type, "price_type": price_type, "price": price,
"maker_price_type": maker_price_type, "maker_price": maker_price})
return a
def price_to_precision(fee, precision=8):
return float(('{:.' + str(precision) + 'f}').format(float(fee)))
def amount_to_precision(amount, precision=0):
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(amount * decimal_precision) / decimal_precision
else:
return float(('%d' % amount))
def relative_target_price_difference(side: str, target_price: float, current_price: float) -> float:
"""
Returns the relative difference of current_price from target price. Negative vallue could be considered as "bad"
difference, positive as "good".
For "sell" order: relative_target_price_difference = (target_price / current_price) - 1
For "buy" orders: relative_target_price_difference = (current_price / target_price) - 1
Means that for "buy" order if the price is greater that the target price - the relative difference will be negative.
For "sell" orders: if the price will be less than target price - the rel. difference will be negative.
:param side: side of the order to compare
:param target_price: the price to compare with
:param current_price: the price which is being compared to target price
:return: relative difference between the current_price and target_price regarding the order's side or None
"""
result = None
if side.lower() == "sell":
result = (current_price / target_price) - 1
return result
if side.lower() == "buy":
result = 1 - (current_price / target_price)
return result
raise (ValueError("Wrong side of the order {}".format(side)))
def convert_currency(start_currency:str, start_amount:float, dest_currency: str = None, symbol: str = None, price: float = None,
ticker: dict = None, side: str = None, taker: bool = True):
"""
:returns the amount of :param dest_currency: which could be gained if converted from :param start_amount:
of :param start_currency:
:param start_currency: currency to convert from
:param start_amount: amount of start_currency
:param dest_currency: currency to convert to
:param symbol: symbol of pair within the conversion
:param price: if price is not set, it would be taken from ticker (by default for TAKER price)
:param side: if symbol is not set, side "buy" or "sell" should be provided
:param ticker: content of dict returned by fetch_tickers for symbol. ex. fetch_tickers()["ETH/BTC"]
:param taker: set to False is maker price should be taken from ticker
"""
if symbol is None:
if ticker is None:
return None
if "symbol" in ticker:
symbol = ticker["symbol"]
else:
return None
if side is None:
side = get_trade_direction_to_currency(symbol, dest_currency)
if not side:
return None
if price is None:
if (taker and side == "buy") or \
(not taker and side == "sell"):
price = float(ticker["ask"])
elif (taker and side == "sell") or \
(not taker and side == "buy"):
price = float(ticker["bid"])
else:
return None
if price == 0:
return None
dest_amount = 0.0
if side.lower() == "sell":
dest_amount = start_amount * price
if side.lower() == "buy":
dest_amount = start_amount / price
return dest_amount
def ticker_price_for_dest_amount(side: str, start_amount: float, dest_amount: float):
"""
:return: price for order to convert start_amount to dest_amount considering order's side
"""
if dest_amount == 0 or start_amount == 0:
raise ValueError("Zero start ot dest amount")
if side is None:
raise ValueError("RecoveryManagerError: Side not set")
else:
side = side.lower()
if side == "buy":
return start_amount / dest_amount
if side == "sell":
return dest_amount / start_amount
return False
def base_amount_for_target_currency(currency, amount, symbol, price: float=None, ticker:dict=None):
"""
Returns amount in base currency in symbol for provided currency, amount and price.
Price could be set directly or taken as taker from ticker symbol dict.
Returns: amount for base currency or 0 if prices are not provided
"""
side = get_trade_direction_to_currency(symbol, dest_currency=currency)
if currency == symbol.split("/")[0]:
return amount
else:
ticker_price = 0.0
if price is None and ticker_price is not None:
ticker_price = ticker.get("bid", 0.0)
elif price is not None:
ticker_price = price
if ticker_price == 0.0:
return 0
return amount / ticker_price
| python | 7,235 |
#!/usr/bin/env python
# What is the first term in the Fibonacci sequence to contain 1000 digits?
from python.decorators import euler_timer
from python.functions import fibonacci_generator
def main(verbose=False):
fib = fibonacci_generator()
fib_index = 0
for value in fib:
# number of digits
if len(str(value)) < 1000:
fib_index += 1
continue
else:
return fib_index
if __name__ == '__main__':
print euler_timer(25)(main)(verbose=True)
| python | 516 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: forever.py command args
"""
import sys
import time
import subprocess
def main():
try:
cmd = ' '.join(sys.argv[1:])
i = 1
while True:
print '=== Iteration {} ==='.format(i)
status = subprocess.call(cmd, shell=True)
print '=== exit status {} ==='.format(status)
i += 1
time.sleep(1)
except KeyboardInterrupt:
print '^C detected'
if __name__ == '__main__':
main()
| python | 530 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : MOT
Case Name : MOT设置group_commit_timeout=1进行SQL语句测试
Description :
1、修改配置文件mot.conf中group_commit_timeout=1
2、重启数据库,gs_om -t stop && gs_om -t start
3、查看mot.conf配置文件参数生效,cat cluster/dn1/mot.conf | grep group_commit_timeout
4、查看pg_log日志信息,cat cluster/../pg_log/xxx.log
5、连接数据库,创建内存表,执行DML语句;
6、清理数据;
Expect :
1、修改成功;
2、重启数据库成功;
3、查看配置文件,参数修改生效;
4、查看日志,存在参数WARNING信息;
5、连接数据库成功,创建内存表成功;
6、清理环境成功;
History :
"""
import os
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
LOG = Logger()
CONSTANT = Constant()
DB_ENV_PATH = macro.DB_ENV_PATH
DB_ENV_PATH = macro.DB_ENV_PATH
MOT_CONF = os.path.join(macro.DB_INSTANCE_PATH, 'mot.conf')
MOT_LOG = os.path.join(macro.PG_LOG_PATH, macro.DN_NODE_NAME.split('/')[0])
CONFIG_PARAM = 'group_commit_timeout = 1'
class MotParamTest(unittest.TestCase):
def setUp(self):
self.sh_primysh = CommonSH('PrimaryDbUser')
self.user_node = Node('PrimaryDbUser')
self.mot_table = 'mot_test'
LOG.info('======检查参数,修改配置,并重启数据库======')
self.config_item = "enable_incremental_checkpoint=off"
check_res = self.sh_primysh.execut_db_sql(
f'''show enable_incremental_checkpoint;''')
if 'off' != check_res.split('\n')[-2].strip():
self.sh_primysh.execute_gsguc(
'set', CONSTANT.GSGUC_SUCCESS_MSG, self.config_item)
self.sh_primysh.restart_db_cluster()
result = self.sh_primysh.get_db_cluster_status()
self.assertTrue("Degraded" in result or "Normal" in result)
def test_mot_dump(self):
LOG.info("======Opengauss_Function_MOT_Case0122开始执行======")
LOG.info("=====步骤1:修改mot.conf配置文件,设置group_commit_timeout=1,重启数据库=====")
add_cmd = f'''source {DB_ENV_PATH}
gs_ssh -c "echo -e '{CONFIG_PARAM}' >> {MOT_CONF}" '''
LOG.info(add_cmd)
add_res = self.user_node.sh(add_cmd).result()
LOG.info(add_res)
self.assertIn('Successfully execute command on all nodes', add_res)
self.sh_primysh.restart_db_cluster()
result = self.sh_primysh.get_db_cluster_status()
self.assertTrue("Degraded" in result or "Normal" in result)
LOG.info("======-步骤2:查看mot.conf文件参数修改是否生效======")
cat_cmd = f'''cat {MOT_CONF} | grep group_commit_timeout'''
msg = self.user_node.sh(cat_cmd).result()
LOG.info(msg)
self.assertIn(CONFIG_PARAM, msg.split('\n')[-1].strip())
LOG.info("======步骤3:查看pg_log日志,存在参数WARNING信息======")
sql_cmd = f'''ls -t {MOT_LOG} | head -1'''
LOG.info(sql_cmd)
log_msg = self.user_node.sh(sql_cmd).result()
LOG.info(log_msg)
cat_cmd = f'''cd {MOT_LOG}
cat {log_msg} | grep group_commit_timeout'''
LOG.info(cat_cmd)
cat_msg = self.user_node.sh(cat_cmd).result()
LOG.info(cat_msg)
self.assertIn('WARNING', cat_msg)
self.assertIn('Configuration of group_commit_timeout=1 '
'is out of bounds [100, 200000]', cat_msg)
LOG.info("======步骤4:连接数据库,创建内存表,执行DML操作======")
sql_cmd = f'''
drop foreign table if exists {self.mot_table};
create foreign table {self.mot_table}(id int);
insert into {self.mot_table}
values(generate_series(1,2000000));
select count(*) from {self.mot_table};
'''
sql_msg = self.sh_primysh.execut_db_sql(sql_cmd)
LOG.info(sql_msg)
self.assertIn(CONSTANT.CREATE_FOREIGN_SUCCESS_MSG, sql_msg)
self.assertIn(CONSTANT.INSERT_SUCCESS_MSG, sql_msg)
self.assertIn('2000000', sql_msg)
def tearDown(self):
LOG.info("======步骤6:清理环境,删除参数======")
del_cmd = f'''source {macro.DB_ENV_PATH};
gsql {self.user_node.db_name}
-p {self.user_node.db_port}
-c "drop foreign table {self.mot_table} cascade"
sed -i '$d' {MOT_CONF}
'''
LOG.info(del_cmd)
self.user_node.sh(del_cmd)
LOG.info('======Opengauss_Function_MOT_Case0122执行结束======')
| python | 4,839 |
import logging
from typing import List, Optional
from dvc.exceptions import InvalidArgumentError
from dvc.repo import locked
from dvc.repo.scm_context import scm_context
from dvc.scm.base import RevError
from .utils import exp_refs, remove_exp_refs, resolve_exp_ref
logger = logging.getLogger(__name__)
@locked
@scm_context
def remove(
repo,
exp_names=None,
queue=False,
clear_all=False,
remote=None,
**kwargs,
):
if not any([exp_names, queue, clear_all]):
return 0
removed = 0
if queue:
removed += _clear_stash(repo)
if clear_all:
removed += _clear_all(repo)
if exp_names:
removed += _remove_exp_by_names(repo, remote, exp_names)
return removed
def _clear_stash(repo):
removed = len(repo.experiments.stash)
repo.experiments.stash.clear()
return removed
def _clear_all(repo):
ref_infos = list(exp_refs(repo.scm))
remove_exp_refs(repo.scm, ref_infos)
return len(ref_infos)
def _get_exp_stash_index(repo, ref_or_rev: str) -> Optional[int]:
stash_revs = repo.experiments.stash_revs
for _, ref_info in stash_revs.items():
if ref_info.name == ref_or_rev:
return ref_info.index
try:
rev = repo.scm.resolve_rev(ref_or_rev)
if rev in stash_revs:
return stash_revs.get(rev).index
except RevError:
pass
return None
def _remove_commited_exps(
repo, remote: Optional[str], exp_names: List[str]
) -> List[str]:
remain_list = []
remove_list = []
for exp_name in exp_names:
ref_info = resolve_exp_ref(repo.scm, exp_name, remote)
if ref_info:
remove_list.append(ref_info)
else:
remain_list.append(exp_name)
if remove_list:
if not remote:
remove_exp_refs(repo.scm, remove_list)
else:
for ref_info in remove_list:
repo.scm.push_refspec(remote, None, str(ref_info))
return remain_list
def _remove_queued_exps(repo, refs_or_revs: List[str]) -> List[str]:
remain_list = []
for ref_or_rev in refs_or_revs:
stash_index = _get_exp_stash_index(repo, ref_or_rev)
if stash_index is None:
remain_list.append(ref_or_rev)
else:
repo.experiments.stash.drop(stash_index)
return remain_list
def _remove_exp_by_names(repo, remote, exp_names: List[str]) -> int:
remained = _remove_commited_exps(repo, remote, exp_names)
if not remote:
remained = _remove_queued_exps(repo, remained)
if remained:
raise InvalidArgumentError(
"'{}' is not a valid experiment".format(";".join(remained))
)
return len(exp_names) - len(remained)
| python | 2,728 |
#!/usr/bin/env python
'''****************************************************************************
* Program - Convert To GFA format
* Author - Mayank Pahadia
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
****************************************************************************'''
'''****************************************************************************
* To run the program, pass three arguments with the python script on command line.
* For example - python convertToGFA.py inputFileName outputFileName kmerSize
* Logic - It reads through the fasta file with all the unitigs information
* and link information and outputs it in the GFA format.
****************************************************************************'''
import sys
import argparse
def write_segment(name,segment,optional,g,links):
add = ""
add += "S\t" #for segment
add += name #id of segment
add += "\t"
add += segment #segment itself
add += "\t"
for i in optional: #optional tags
add+=i
add+="\t"
#adding Segment to the file
g.write(add.strip()+"\n")
for j in links: #adding all the links of the current segment to the GFA file
g.write(j)
def main():
parser = argparse.ArgumentParser(description="Convert a bcalm-generated FASTA to a GFA.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inputFilename', help='Input FASTA file')
parser.add_argument('outputFilename', help='Output GFA file')
parser.add_argument('kmerSize', type=int, help='k-mer length')
parser.add_argument('-s', '--single-directed', action='store_true',
help='Avoid outputting the whole skew-simmetric graph and output only one edge between two nodes',
dest='single_directed')
args = parser.parse_args()
with open(args.inputFilename) as f:
#name stores the id of the unitig
#optional is a list which stores all the optional tags of a segment
#links stores all the link information about a segment
name = ""
optional=[]
links=[]
g = open(args.outputFilename,'w')
#adding Header to the file
k = int(args.kmerSize)
g.write('H\tVN:Z:1.0\t k:i:%d\n' %k) # includes the k-mer size
print("GFA file open")
#firstLine is for implemetation purpose so that we don't add some garbage value to the output file.
firstLine = 0
#segment stores the segment till present, in a fasta file, segment can be on many lines, hence we need to get the whole segment from all the lines
segment = ""
for line in f:
line = line.replace("\n","")
if(line[0]!=">"):
#segment might be in more than one line, hence we get the whole segment first, and then put it in the GFA file.
segment += line
if(line[0]==">"):
if(firstLine!=0):#if it's not the firstline in the input file, we store the input in GFA format in the output file
write_segment(name,segment,optional,g,links)
segment = ""
firstLine = 1
#once the previous segment and it's information has been stored, we start the next segment and it's information
a = line.split(" ")
name=a[0][1:] #get the id
optional=[]
links = []
#we skip the first value because the first value is ">ID"
for i in range(1,len(a)):
#we need this because the line can end with a space, hence we get one extra value in our list.
if(a[i]==""):
continue
if(a[i][0:2] == "MA"): #previous bcalm2 versions had "MA=[xxx]" optional tag as well, kept it just for compatibility, and reformated
optional.append(a[i][0:2]+":f:"+a[i][2:])
elif(a[i][0:2] == "L:"): #for links
b = a[i].split(":")
k1 = int(args.kmerSize)-1
if args.single_directed:
if name < b[2]:
links.append("L\t"+name+"\t"+b[1]+"\t"+b[2]+"\t"+b[3]+"\t"+str(k1)+"M\n")
elif name == b[2] and not (b[1] == b[3] == '-'): # manage links between the same unitig
links.append("L\t"+name+"\t"+b[1]+"\t"+b[2]+"\t"+b[3]+"\t"+str(k1)+"M\n")
else:
links.append("L\t"+name+"\t"+b[1]+"\t"+b[2]+"\t"+b[3]+"\t"+str(k1)+"M\n")
else: #all the other optional tags
optional.append(a[i])
#we will miss the last one, because it won't go into the if condition - if(line[0]==">") and hence won't add the segment to the file.
write_segment(name,segment,optional,g,links)
print("done")
g.close()
if __name__ == "__main__":
main()
| python | 5,565 |
import numpy as np
def pad_one_more(A):
if len(A.shape) == 2:
return np.pad(A, ((0,1), (0,1)), 'edge')
else:
return np.pad(A, ((0,1), (0,1), (0, 0)), 'edge')
def remove_pad_one_more(A):
return A[:-1, :-1, ...] | python | 239 |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class OatppConan(ConanFile):
name = "oatpp"
description = "Modern Web Framework for C++"
homepage = "https://github.com/oatpp/oatpp"
license = "Apache-2.0"
topics = ("conan", "oat++", "oatpp", "web-framework")
url = "https://github.com/conan-io/conan-center-index"
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
exports_sources = "CMakeLists.txt"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("oatpp-{0}".format(self.version), self._source_subfolder)
def configure(self):
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration("oatpp can not be built as shared library on Windows")
if self.settings.compiler == "gcc" and tools.Version(self.settings.compiler.version) < "5":
raise ConanInvalidConfiguration("oatpp requires GCC >=5")
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["OATPP_BUILD_TESTS"] = False
self._cmake.definitions["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.includedirs = [os.path.join("include", "oatpp-{}".format(self.version), "oatpp")]
self.cpp_info.libdirs = [os.path.join("lib", "oatpp-{}".format(self.version))]
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["pthread"]
if self.settings.os == "Windows":
self.cpp_info.system_libs.append("ws2_32")
| python | 2,573 |
import string
from typing import List, Tuple
import nltk
import os
import plotly
import pandas
import plotly.figure_factory as ff
import plotly.graph_objs as go
from wordcloud import WordCloud, get_single_color_func
from snlp import logger
from tqdm import tqdm
from nltk.corpus import stopwords
from plotly.subplots import make_subplots
def create_adjust_subplots(labels: List[Tuple]) -> plotly.graph_objects.Figure:
"""Create subplots and adjust the location of the titles wrt the number of labels.
Args:
labels (list): List of (label, type) tuples.
Returns:
fig (plotly.graph_objs.Figure)
"""
if len(labels) == 0:
titles = (
"Analysis of Text",
"Analysis of Labels",
"Document Lengths",
"",
"",
"",
"",
"Word Frequency",
"",
"",
"",
"",
"" "Common Nouns",
"",
"",
"",
"Common Adjectives",
"",
"",
"",
"Common Verbs",
)
elif len(labels) == 1:
titles = (
"Analysis of Text",
"Analysis of Labels",
"Document Lengths",
labels[0][0].capitalize(),
"",
"",
"Word Frequency",
"",
"",
"",
"",
"Common Nouns",
"",
"",
"",
"Common Adjectives",
"",
"",
"",
"Common Verbs",
)
elif len(labels) == 2:
titles = (
"Analysis of Text",
"Analysis of Labels",
"Document Lengths",
labels[0][0].capitalize(),
"",
labels[1][0].capitalize(),
"Word Frequency",
"",
"",
"Common Nouns",
"",
"",
"",
"Common Adjectives",
"",
"",
"",
"Common Verbs",
)
elif len(labels) == 3:
titles = (
"Analysis of Text",
"Analysis of Labels",
"Document Lengths",
labels[0][0].capitalize(),
"",
labels[1][0].capitalize(),
"Word Frequency",
labels[2][0].capitalize(),
"",
"Common Nouns",
"",
"",
"",
"Common Adjectives",
"",
"",
"",
"Common Verbs",
)
elif len(labels) == 4:
titles = (
"Analysis of Text",
"Analysis of Labels",
"Document Lengths",
labels[0][0].capitalize(),
"",
labels[1][0].capitalize(),
"Word Frequency",
labels[2][0].capitalize(),
"",
"Common Nouns",
labels[3][0].capitalize(),
"",
"Common Adjectives",
"",
"",
"",
"Common Verbs",
)
fig = make_subplots(
rows=16,
cols=2,
subplot_titles=titles,
specs=[
[{}, {}],
[{"rowspan": 2}, {"rowspan": 2} if len(labels) >= 1 else {}], # row 2
[None, None if len(labels) >= 1 else {}],
[{}, {"rowspan": 2} if len(labels) >= 2 else {}], # row 4
[{"rowspan": 2}, None if len(labels) >= 2 else {}],
[None, {"rowspan": 2} if len(labels) >= 3 else {}], # row 6
[{}, None if len(labels) >= 3 else {}],
[{"rowspan": 3}, {"rowspan": 2} if len(labels) >= 4 else {}], # row 8
[None, None if len(labels) >= 4 else {}],
[None, {}],
[{"rowspan": 3}, {}], # 12
[None, {}],
[None, {}],
[{"rowspan": 3}, {}], # 16
[None, {}],
[None, {}],
],
vertical_spacing=0.035,
)
return fig
def plotly_wordcloud(token_count_dic: dict) -> plotly.graph_objects.Scatter:
"""Create a world cloud trace for plotly.
Args:
token_count_dic (dictionary): Dictionary of token to its count
Returns:
trace (plotly.graph_objects.Scatter)
"""
wc = WordCloud(color_func=get_single_color_func("deepskyblue"), max_words=100)
wc.generate_from_frequencies(token_count_dic)
word_list = []
rel_freq_list = []
freq_list = []
fontsize_list = []
position_list = []
orientation_list = []
color_list = []
for (word, rel_freq), fontsize, position, orientation, color in wc.layout_:
word_list.append(word)
rel_freq_list.append(rel_freq)
freq_list.append(token_count_dic[word])
fontsize_list.append(fontsize)
position_list.append(position)
orientation_list.append(orientation)
color_list.append(color)
# get the positions
x = []
y = []
for i in position_list:
x.append(i[0])
y.append(i[1])
# get the relative occurrence frequencies
new_freq_list = []
for i in rel_freq_list:
i_tmp = round(i*100, 4)
i_tmp = i_tmp if i_tmp > 1 else 1 # Plotly textfont.size in go.Scatter throws exception for values below 1.
new_freq_list.append(i_tmp)
try:
trace = go.Scatter(
x=x,
y=y,
textfont=dict(size=new_freq_list, color=color_list),
hoverinfo="text",
hovertext=["{0}: {1}".format(w, f) for w, f in zip(word_list, freq_list)],
mode="text",
text=word_list,
)
return trace
except Exception as E:
logger.error(f'While creating the word cloud, plotly.go returned the following error \
\n{E}\nfor relative frequencies: {rel_freq_list}\nthat were mapped to {new_freq_list}')
def generate_text_plots(
figure: plotly.graph_objs.Figure,
doc_length_list: List,
word_freq_list: List,
noun_freq_dict: dict,
adj_freq_dic: dict,
verb_freq_dic: dict,
) -> None:
"""Generate distribution plots and word clouds for the textual content based on the input argumnets.
Args:
figure (plotly.graph_objs.Figure): Figure object in which the plots are created.
doc_length_list (list): List containing the length of each document.
word_freq_list (list): List containing the frequency of each word.
noun_freq_dict (dictionary): Dictionary of noun to frequency.
adj_freq_dict (dictionary): Dictionary of noun to frequency.
verb_freq_dict (dictionary): Dictionary of verb to frequency.
Returns:
None
"""
# Customize plots and x, y labels: https://plotly.com/python/subplots/#customizing-subplot-axes
# Word cloud with plotly: https://github.com/PrashantSaikia/Wordcloud-in-Plotly/blob/master/plotly_wordcloud.py
def _distplot_to_dist_trace(data: List, color: str) -> dict:
"""Create a trace from data.
Args:
data (list): List of numbers
color (list[str])
Returns:
data_dist (dict): Representation of a distplot figure.
"""
data_dist = ff.create_distplot([data], group_labels=["distplot"], colors=[color])["data"]
for item in data_dist:
item.pop("xaxis", None)
item.pop("yaxis", None)
return data_dist
data1_dist = _distplot_to_dist_trace(doc_length_list, color="rgb(0, 200, 200)")
data2_dist = _distplot_to_dist_trace(word_freq_list, color="magenta")
d1_hist = data1_dist[0]
d1_kde = data1_dist[1]
d1_rug = data1_dist[2]
d2_hist = data2_dist[0]
d2_kde = data2_dist[1]
d2_rug = data2_dist[2]
noun_cloud = plotly_wordcloud(token_count_dic=noun_freq_dict)
adj_cloud = plotly_wordcloud(token_count_dic=adj_freq_dic)
verb_cloud = plotly_wordcloud(token_count_dic=verb_freq_dic)
figure.append_trace(d1_hist, 2, 1)
figure.append_trace(d1_kde, 2, 1)
figure.append_trace(d1_rug, 4, 1)
figure.append_trace(d2_hist, 5, 1)
figure.append_trace(d2_kde, 5, 1)
figure.append_trace(d2_rug, 7, 1)
figure.append_trace(noun_cloud, 8, 1)
figure.append_trace(adj_cloud, 11, 1)
figure.append_trace(verb_cloud, 14, 1)
figure.update_xaxes(rangemode="tozero", row=4, col=1)
figure.update_yaxes(showticklabels=False, row=4, col=1)
figure.update_xaxes(rangemode="tozero", row=7, col=1)
figure.update_yaxes(showticklabels=False, row=7, col=1)
figure.update_yaxes(title_text="Probability", row=2, col=1)
figure.update_yaxes(title_text="Probability", row=5, col=1)
figure.update_xaxes(showticklabels=False, zeroline=False, row=8, col=1)
figure.update_xaxes(showticklabels=False, zeroline=False, row=11, col=1)
figure.update_xaxes(showticklabels=False, zeroline=False, row=14, col=1)
figure.update_yaxes(showticklabels=False, zeroline=False, row=8, col=1)
figure.update_yaxes(showticklabels=False, zeroline=False, row=11, col=1)
figure.update_yaxes(showticklabels=False, zeroline=False, row=14, col=1)
def generate_label_plots(figure: plotly.graph_objs.Figure, df: pandas.DataFrame, label_cols: str) -> None:
"""Generate histogram and bar plots for the labels in label_cols.
Args:
figure (plotly.graph_objs.Figure): Figure object in which the plots are created.
df (Pandas DataFrame): DataFrame that contains labels specified in label_cols.
label_cols (list): list of tuples in the form of [('label_1', 'categorical/numerical'),
('label_2', 'categorical/numerical'), ...]
Returns:
None
"""
if len(label_cols) == 1:
lab_trace1 = label_plot(df, label_col=label_cols[0][0], label_type=label_cols[0][1])
figure.append_trace(lab_trace1, 2, 2)
figure.update_yaxes(title_text="Count", row=2, col=2)
elif len(label_cols) == 2:
lab_trace1 = label_plot(df, label_col=label_cols[0][0], label_type=label_cols[0][1])
lab_trace2 = label_plot(df, label_col=label_cols[1][0], label_type=label_cols[1][1])
figure.append_trace(lab_trace1, 2, 2)
figure.append_trace(lab_trace2, 4, 2)
figure.update_yaxes(title_text="Count", row=2, col=2)
figure.update_yaxes(title_text="Count", row=4, col=2)
elif len(label_cols) == 3:
lab_trace1 = label_plot(df, label_col=label_cols[0][0], label_type=label_cols[0][1])
lab_trace2 = label_plot(df, label_col=label_cols[1][0], label_type=label_cols[1][1])
lab_trace3 = label_plot(df, label_col=label_cols[2][0], label_type=label_cols[2][1])
figure.append_trace(lab_trace1, 2, 2)
figure.append_trace(lab_trace2, 4, 2)
figure.append_trace(lab_trace3, 6, 2)
figure.update_yaxes(title_text="Count", row=2, col=2)
figure.update_yaxes(title_text="Count", row=4, col=2)
figure.update_yaxes(title_text="Count", row=6, col=2)
elif len(label_cols) == 4:
lab_trace1 = label_plot(df, label_col=label_cols[0][0], label_type=label_cols[0][1])
lab_trace2 = label_plot(df, label_col=label_cols[1][0], label_type=label_cols[1][1])
lab_trace3 = label_plot(df, label_col=label_cols[2][0], label_type=label_cols[2][1])
lab_trace4 = label_plot(df, label_col=label_cols[3][0], label_type=label_cols[3][1])
figure.append_trace(lab_trace1, 2, 2)
figure.append_trace(lab_trace2, 4, 2)
figure.append_trace(lab_trace3, 6, 2)
figure.append_trace(lab_trace4, 8, 2)
figure.update_yaxes(title_text="Count", row=2, col=2)
figure.update_yaxes(title_text="Count", row=4, col=2)
figure.update_yaxes(title_text="Count", row=6, col=2)
figure.update_yaxes(title_text="Count", row=8, col=2)
def label_plot(df: pandas.DataFrame, label_col: str, label_type: str) -> plotly.graph_objects.Histogram:
"""Create a plot for label_col in df, wrt to label_type.
Args:
df (Pandas DataFrame): DataFrame that contains label_col.
label_col (str): Name of the label column in df that must be plotted.
label_type (str): Represents the type of label and consequently specifies the type of plot.
It can be "numerical" or "categorical".
Returns:
trace (plotly.graph_objects.Histogram)
"""
if label_type == "categorical":
values = df[label_col].unique().tolist() # ['pos', 'neg', 'neutral']
counts = df[label_col].value_counts() # 1212323
x = []
y = []
for v in values:
x.append(v)
y.append(counts[v])
trace = go.Bar(x=x, y=y, name=label_col)
elif label_type == "numerical":
trace = go.Histogram(x=df[label_col], name=label_col)
else:
raise ValueError('label_col input argument must be set to either "categorical" or "numerical".')
return trace
def generate_report(
df: pandas.DataFrame,
out_dir: str,
text_col: str,
label_cols: List = [],
language: str = "english",
skip_stopwords_punc: bool = True,
save_report: bool = False,
) -> None:
"""Generate analysis report and eitherr renders the report via Plotly show api or saves it offline to html.
Args:
df (pandas.DataFrame): DataFrame that contains text and labels.
out_dir (str): Dir where the report is saved. Required only when save_report option is True.
text_col (str): Name of the column that contains a tokenized text content.
label_cols (list): list of tuples in the form of [('label_1', 'categorical/numerical'),
('label_2', 'categorical/numerical'), ...]
language (str): Language of the text in df[text_col]
skip_stopwords_punc (bool): Whether or not skip stopwords and punctuations in the analysis. Default: True
save_report (bool): Whether or not save the report as an html file. Default: False
Returns:
None
"""
def update_count(items_dic: dict, items: List[str]) -> None:
"""Update the corresponding count for each key in items_dic. w.r.t. terms in items.
Args:
items_dic (dict): Dictionary mapping keys to their count
items (list): List of tokens
Returns:
None
"""
for t in items:
if t in items_dic:
items_dic[t] += 1
else:
items_dic[t] = 1
def get_pos(tagged_tokens: List[Tuple[str, str]], goal_pos: str) -> List:
"""Extracts goal_pos POS tags from tagged_tokens.
Args:
tagged_tokens (List[Tuple(str, str)]): Contains terms and ther pos tags. E.g.
[('cat', 'NN'), ('sat', 'VBD'), ('on', 'IN'), ('mat', 'NN')]
goal_pos (str): Pos tag to look for in tagged_tokens
Returns:
res (List(str)): List of tokens with goal_pos pos tag
"""
res = []
for pt in tagged_tokens:
if pt[1].startswith(goal_pos):
res.append(pt[0])
return res
if len(label_cols) > 4:
raise ValueError("Maximum of 4 labels can be specidied for analysis.")
stop_words = set(stopwords.words(language))
punctuations = set(string.punctuation)
doc_lengths = []
token_to_count = {}
NNs = {}
JJs = {}
Vs = {}
logger.info("Processing text in %s column of the input DataFrame..." % text_col)
for text in tqdm(df[text_col]):
try:
tokens = text.lower().split(" ")
doc_lengths.append(len(tokens))
if skip_stopwords_punc:
tokens = [t for t in tokens if t not in stop_words and t not in punctuations]
update_count(token_to_count, tokens)
except Exception as e:
logger.warning("Processing entry --- %s --- lead to exception: %s" % (text, e.args[0]))
continue
postag_tokens = nltk.pos_tag(tokens)
nouns = get_pos(postag_tokens, "NN")
update_count(NNs, nouns)
verbs = get_pos(postag_tokens, "VB")
update_count(Vs, verbs)
adjectives = get_pos(postag_tokens, "JJ")
update_count(JJs, adjectives)
word_frequencies = [v for _, v in token_to_count.items()]
fig_main = create_adjust_subplots(label_cols)
logger.info("Generating distplots and word cloud for input text")
generate_text_plots(fig_main, doc_lengths, word_frequencies, NNs, JJs, Vs)
logger.info("Generating plots for labels")
generate_label_plots(fig_main, df, label_cols)
logger.info("Rendering plots")
fig_main.update_layout(height=3100, showlegend=False)
if save_report:
plotly.offline.plot(fig_main, filename=os.path.join(out_dir, "report.html"))
else:
fig_main.show()
| python | 16,959 |
"""Support for Ubiquiti's UVC cameras."""
import logging
import socket
import requests
import voluptuous as vol
from homeassistant.const import CONF_PORT, CONF_SSL
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
CONF_NVR = "nvr"
CONF_KEY = "key"
CONF_PASSWORD = "password"
DEFAULT_PASSWORD = "ubnt"
DEFAULT_PORT = 7080
DEFAULT_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NVR): cv.string,
vol.Required(CONF_KEY): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Discover cameras on a Unifi NVR."""
addr = config[CONF_NVR]
key = config[CONF_KEY]
password = config[CONF_PASSWORD]
port = config[CONF_PORT]
ssl = config[CONF_SSL]
from uvcclient import nvr
try:
# Exceptions may be raised in all method calls to the nvr library.
nvrconn = nvr.UVCRemote(addr, port, key, ssl=ssl)
cameras = nvrconn.index()
identifier = "id" if nvrconn.server_version >= (3, 2, 0) else "uuid"
# Filter out airCam models, which are not supported in the latest
# version of UnifiVideo and which are EOL by Ubiquiti
cameras = [
camera
for camera in cameras
if "airCam" not in nvrconn.get_camera(camera[identifier])["model"]
]
except nvr.NotAuthorized:
_LOGGER.error("Authorization failure while connecting to NVR")
return False
except nvr.NvrError as ex:
_LOGGER.error("NVR refuses to talk to me: %s", str(ex))
raise PlatformNotReady
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NVR: %s", str(ex))
raise PlatformNotReady
add_entities(
[
UnifiVideoCamera(nvrconn, camera[identifier], camera["name"], password)
for camera in cameras
]
)
return True
class UnifiVideoCamera(Camera):
"""A Ubiquiti Unifi Video Camera."""
def __init__(self, nvr, uuid, name, password):
"""Initialize an Unifi camera."""
super(UnifiVideoCamera, self).__init__()
self._nvr = nvr
self._uuid = uuid
self._name = name
self._password = password
self.is_streaming = False
self._connect_addr = None
self._camera = None
self._motion_status = False
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def is_recording(self):
"""Return true if the camera is recording."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo["recordingSettings"]["fullTimeRecordEnabled"]
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo["recordingSettings"]["motionRecordEnabled"]
@property
def brand(self):
"""Return the brand of this camera."""
return "Ubiquiti"
@property
def model(self):
"""Return the model of this camera."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo["model"]
def _login(self):
"""Login to the camera."""
from uvcclient import camera as uvc_camera
caminfo = self._nvr.get_camera(self._uuid)
if self._connect_addr:
addrs = [self._connect_addr]
else:
addrs = [caminfo["host"], caminfo["internalHost"]]
if self._nvr.server_version >= (3, 2, 0):
client_cls = uvc_camera.UVCCameraClientV320
else:
client_cls = uvc_camera.UVCCameraClient
if caminfo["username"] is None:
caminfo["username"] = "ubnt"
camera = None
for addr in addrs:
try:
camera = client_cls(addr, caminfo["username"], self._password)
camera.login()
_LOGGER.debug(
"Logged into UVC camera %(name)s via %(addr)s",
dict(name=self._name, addr=addr),
)
self._connect_addr = addr
break
except socket.error:
pass
except uvc_camera.CameraConnectError:
pass
except uvc_camera.CameraAuthError:
pass
if not self._connect_addr:
_LOGGER.error("Unable to login to camera")
return None
self._camera = camera
return True
def camera_image(self):
"""Return the image of this camera."""
from uvcclient import camera as uvc_camera
if not self._camera:
if not self._login():
return
def _get_image(retry=True):
try:
return self._camera.get_snapshot()
except uvc_camera.CameraConnectError:
_LOGGER.error("Unable to contact camera")
except uvc_camera.CameraAuthError:
if retry:
self._login()
return _get_image(retry=False)
_LOGGER.error("Unable to log into camera, unable to get snapshot")
raise
return _get_image()
def set_motion_detection(self, mode):
"""Set motion detection on or off."""
from uvcclient.nvr import NvrError
if mode is True:
set_mode = "motion"
else:
set_mode = "none"
try:
self._nvr.set_recordmode(self._uuid, set_mode)
self._motion_status = mode
except NvrError as err:
_LOGGER.error("Unable to set recordmode to %s", set_mode)
_LOGGER.debug(err)
def enable_motion_detection(self):
"""Enable motion detection in camera."""
self.set_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self.set_motion_detection(False)
| python | 6,348 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-10-24 07:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20181024_1114'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='filename',
),
]
| python | 392 |
/usr/lib/python3.6/heapq.py | python | 27 |
# Copyright (c) 2014-2018, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This sample illustrates a simple CUDA source to PTX compiler implemented using
NVRTC. All command-line options are passed along to NVRTC. Arguments that
start with '-' are assumed to be options and are passed along accordingly.
Otherwise, the option is treated as a file name and is read as input.
NOTE: If you get errors about not being able to load nvrtc, please make sure
your [DY]LD_LIBRARY_PATH/PATH environment variable points to the nvrtc binary
in your CUDA installation, e.g.
$ export LD_LIBRARY_PATH=/usr/local/cuda-9.2/lib64:$LD_LIBRARY_PATH
"""
import sys
from pynvrtc.compiler import Program, ProgramException
if len(sys.argv) < 2:
print('Usage: %s [options] <cuda source file>' % sys.argv[0])
sys.exit(1)
try:
src = None
options = []
# Parse all options
for a in sys.argv[1:]:
if a.startswith('-'):
# Treat as compiler option
options.append(a)
else:
# Treat as compiler input
with open(a, 'rb') as f:
src = f.read()
# Create program object
p = Program(src)
# Run the compile
ptx = p.compile(options)
# Dump the output to stdout
print(ptx)
sys.exit(0)
except ProgramException as e:
# An error occurred, dump it to stdout
print('ERROR:\n%s\n' % repr(e))
| python | 2,456 |
'''Locally Linear Embedding for Regression'''
import numpy as np
from scipy.sparse import eye as speye
from scipy.sparse.csgraph import laplacian
from sklearn.manifold.locally_linear import (
barycenter_kneighbors_graph, null_space, LocallyLinearEmbedding)
from sklearn.metrics.pairwise import pairwise_distances, rbf_kernel
from sklearn.neighbors import NearestNeighbors
def ller(X, Y, n_neighbors, n_components, mu=0.5, gamma=None, reg=1e-3,
eigen_solver='auto', tol=1e-6, max_iter=100, random_state=None):
"""
Locally Linear Embedding for Regression (LLER)
Parameters
----------
X : ndarray, 2-dimensional
The data matrix, shape (num_points, num_dims)
Y : ndarray, 1 or 2-dimensional
The response matrix, shape (num_points, num_responses).
n_neighbors : int
Number of neighbors for kNN graph construction.
n_components : int
Number of dimensions for embedding.
mu : float, optional
Influence of the Y-similarity penalty.
gamma : float, optional
Scaling factor for RBF kernel on Y.
Defaults to the inverse of the median distance between rows of Y.
Returns
-------
embedding : ndarray, 2-dimensional
The embedding of X, shape (num_points, n_components)
lle_error : float
The embedding error of X (for a fixed reconstruction matrix W)
ller_error : float
The embedding error of X that takes Y into account.
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if Y.ndim == 1:
Y = Y[:, None]
if gamma is None:
dists = pairwise_distances(Y)
gamma = 1.0 / np.median(dists)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
Nx, d_in = X.shape
Ny = Y.shape[0]
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= Nx:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
if Nx != Ny:
raise ValueError("X and Y must have same number of points")
M_sparse = (eigen_solver != 'dense')
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
if M_sparse:
M = speye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1
P = rbf_kernel(Y, gamma=gamma)
L = laplacian(P, normed=False)
M /= np.abs(M).max() # optional scaling step
L /= np.abs(L).max()
omega = M + mu * L
embedding, lle_error = null_space(omega, n_components, k_skip=1,
eigen_solver=eigen_solver, tol=tol,
max_iter=max_iter,
random_state=random_state)
ller_error = np.trace(embedding.T.dot(L).dot(embedding))
return embedding, lle_error, ller_error
class LLER(LocallyLinearEmbedding):
"""Scikit-learn compatible class for LLER."""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
neighbors_algorithm='auto', random_state=None,
mu=0.5, gamma=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.mu = mu
self.gamma = gamma
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.embedding_
def fit(self, X, Y):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
self.nbrs_.fit(X)
self.embedding_, self.lle_error_, self.ller_error_ = ller(
self.nbrs_, Y, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, random_state=self.random_state,
mu=self.mu, gamma=self.gamma)
return self
| python | 4,413 |
from .models import db
def get_all(model):
data = model.query.all()
return data
def add_instance(model, **kwargs):
instance = model(**kwargs)
db.session.add(instance)
commit_changes()
def delete_instance(model, id):
model.query.filter_by(id=id).delete()
commit_changes()
def edit_instances(model, id, **kwargs):
instance = model.query.filter_by(id=id).all()[0]
for attr, new_value in kwargs:
setattr(instance, attr, new_value)
commit_changes()
def commit_changes():
db.session.commit()
| python | 548 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['GcmChannel']
class GcmChannel(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key: Optional[pulumi.Input[str]] = None,
application_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Pinpoint GCM Channel resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
app = aws.pinpoint.App("app")
gcm = aws.pinpoint.GcmChannel("gcm",
application_id=app.application_id,
api_key="api_key")
```
## Import
Pinpoint GCM Channel can be imported using the `application-id`, e.g.
```sh
$ pulumi import aws:pinpoint/gcmChannel:GcmChannel gcm application-id
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_key: Platform credential API key from Google.
:param pulumi.Input[str] application_id: The application ID.
:param pulumi.Input[bool] enabled: Whether the channel is enabled or disabled. Defaults to `true`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_key is None and not opts.urn:
raise TypeError("Missing required property 'api_key'")
__props__['api_key'] = api_key
if application_id is None and not opts.urn:
raise TypeError("Missing required property 'application_id'")
__props__['application_id'] = application_id
__props__['enabled'] = enabled
super(GcmChannel, __self__).__init__(
'aws:pinpoint/gcmChannel:GcmChannel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_key: Optional[pulumi.Input[str]] = None,
application_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None) -> 'GcmChannel':
"""
Get an existing GcmChannel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_key: Platform credential API key from Google.
:param pulumi.Input[str] application_id: The application ID.
:param pulumi.Input[bool] enabled: Whether the channel is enabled or disabled. Defaults to `true`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_key"] = api_key
__props__["application_id"] = application_id
__props__["enabled"] = enabled
return GcmChannel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> pulumi.Output[str]:
"""
Platform credential API key from Google.
"""
return pulumi.get(self, "api_key")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> pulumi.Output[str]:
"""
The application ID.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the channel is enabled or disabled. Defaults to `true`.
"""
return pulumi.get(self, "enabled")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| python | 5,379 |
import networkx as nx
with open("input.txt") as f:
data = [l.split("-") for l in f.read().splitlines()]
g = nx.Graph()
for d in data:
g.add_edge(d[0], d[1])
# Original function from https://stackoverflow.com/questions/24471136/how-to-find-all-paths-between-two-graph-nodes
def find_all_paths(graph: nx.Graph, start: str, end: str, path: list[str] = []):
path = path + [start]
if start == end:
return [path]
if start not in graph:
return []
paths = []
node: str
for node in nx.neighbors(graph, start):
if node not in path or node.isupper():
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
print(len(find_all_paths(g, "start", "end")))
| python | 727 |
from __future__ import print_function
import sys
if len(sys.argv) != 4:
print('Usage:')
print('python train.py datacfg cfgfile weightfile')
exit()
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
from torch.autograd import Variable
import dataset
import random
import math
import os
from utils import *
from cfg import parse_cfg
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet
# Training settings
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
# batch_size = int(net_options['batch'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
# Train parameters
max_epochs = max_batches * batch_size / nsamples + 1
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 10 # epoches
dot_interval = 70 # batches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
if not os.path.exists(backupdir):
os.mkdir(backupdir)
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
model = Darknet(cfgfile)
region_loss = model.loss
model.load_weights(weightfile)
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen / batch_size
init_width = model.width
init_height = model.height
init_epoch = model.seen / nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
lambda x: 2 * (x - 0.5)
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay * batch_size}]
optimizer = optim.SGD(model.parameters(), lr=learning_rate / batch_size,
momentum=momentum, dampening=0, weight_decay=decay * batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr / batch_size
return lr
def train(epoch):
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
lambda x: 2 * (x - 0.5)
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' %
(epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target) in enumerate(train_loader):
t2 = time.time()
adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
# if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
#target= target.cuda()
t3 = time.time()
data, target = Variable(data), Variable(target)
t4 = time.time()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2 - t1)
avg_time[1] = avg_time[1] + (t3 - t2)
avg_time[2] = avg_time[2] + (t4 - t3)
avg_time[3] = avg_time[3] + (t5 - t4)
avg_time[4] = avg_time[4] + (t6 - t5)
avg_time[5] = avg_time[5] + (t7 - t6)
avg_time[6] = avg_time[6] + (t8 - t7)
avg_time[7] = avg_time[7] + (t9 - t8)
avg_time[8] = avg_time[8] + (t9 - t1)
print('-------------------------------')
print(' load data : %f' % (avg_time[0] / (batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1] / (batch_idx)))
print('cuda to variable : %f' % (avg_time[2] / (batch_idx)))
print(' zero_grad : %f' % (avg_time[3] / (batch_idx)))
print(' forward feature : %f' % (avg_time[4] / (batch_idx)))
print(' forward loss : %f' % (avg_time[5] / (batch_idx)))
print(' backward : %f' % (avg_time[6] / (batch_idx)))
print(' step : %f' % (avg_time[7] / (batch_idx)))
print(' total : %f' % (avg_time[8] / (batch_idx)))
t1 = time.time()
print('')
t1 = time.time()
logging('training with %f samples/s' %
(len(train_loader.dataset) / (t1 - t0)))
if (epoch + 1) % save_interval == 0:
logging('save weights to %s/%06d.weights' % (backupdir, epoch + 1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch + 1))
def test(epoch):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data).data
all_boxes = get_region_boxes(
output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i]
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals + 1
for i in range(num_gts):
box_gt = [truths[i][1], truths[i][2], truths[i]
[3], truths[i][4], 1.0, 1.0, truths[i][0]]
best_iou = 0
best_j = -1
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
if iou > best_iou:
best_j = j
best_iou = iou
if best_iou > iou_thresh and boxes[best_j][6] == box_gt[6]:
correct = correct + 1
precision = 1.0 * correct / (proposals + eps)
recall = 1.0 * correct / (total + eps)
fscore = 2.0 * precision * recall / (precision + recall + eps)
logging("precision: %f, recall: %f, fscore: %f" %
(precision, recall, fscore))
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
for epoch in range(int(init_epoch), int(max_epochs)):
train(epoch)
test(epoch)
| python | 9,191 |
import shutil
shutil.rmtree("docs")
| python | 36 |
import setuptools
import os
import sys
import distutils.spawn
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.errors import *
import pickle
__version__ = "0.8"
# Disable hard links, otherwise building distributions fails on OS X
try:
del os.link
except:
pass
# On Windows, we need ws2_32 and iphlpapi
if getattr(sys, 'getwindowsversion', None):
libraries = ['ws2_32', 'iphlpapi']
def_macros = [('WIN32', 1)]
else:
mos = getattr(sys, 'platform', None)
libraries = []
if mos.startswith('sunos'):
libraries = ['socket', 'nsl']
def_macros = []
def_macros.append(("NETIFACES_VERSION", __version__))
iface_mod = Extension('netifaces', sources=['netifaces.c'],
libraries=libraries,
define_macros=def_macros)
#
# There must be a better way to do this...
#
class my_build_ext(build_ext):
def build_extensions(self):
self.check_requirements()
build_ext.build_extensions(self)
def test_build(self, contents, link=True, execute=False, libraries=None,
include_dirs=None, library_dirs=None):
name = os.path.join(self.build_temp, 'conftest-%s.c' % self.conftestidx)
self.conftestidx += 1
if os.path.exists(name):
os.unlink(name)
thefile = open(name, 'w')
print >>thefile, contents
thefile.close()
sys.stdout.flush()
sys.stderr.flush()
mystdout = os.dup(1)
mystderr = os.dup(2)
result = True
try:
os.dup2(self.ctout, 1)
os.dup2(self.ctout, 2)
try:
objects = self.compiler.compile([name],
output_dir=self.build_temp,
include_dirs=include_dirs,
debug=self.debug)
if link:
self.compiler.link_executable(objects,
'conftest',
output_dir=self.build_temp,
library_dirs=library_dirs,
libraries=libraries,
debug=self.debug)
if execute:
abspath = os.path.abspath(os.path.join(self.build_temp,
'conftest'))
pipe = os.popen(abspath, 'r')
result = pipe.read().strip()
status = pipe.close()
if status is None:
status = 0
if result == '':
result = True
if status != 0:
result = False
finally:
os.dup2(mystdout, 1)
os.dup2(mystderr, 2)
except CompileError:
return False
except DistutilsExecError:
return False
return result
def check_requirements(self):
# Load the cached config data from a previous run if possible; compiling
# things to test for features is slow
cache_file = os.path.join(self.build_temp, 'config.cache')
if os.path.exists(cache_file):
myfile = open(cache_file, 'r')
try:
results = pickle.load(myfile)
finally:
myfile.close()
else:
results = {}
self.conftestidx = 0
print "checking for getifaddrs...",
result = results.get('have_getifaddrs', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
outname = os.path.join(self.build_temp, 'conftest.out')
self.ctout = os.open(outname, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <ifaddrs.h>
int main(void) {
struct ifaddrs *addrs;
int ret;
ret = getifaddrs(&addrs);
freeifaddrs (addrs);
return 0;
}
"""
if self.test_build(testrig):
result = True
else:
result = False
if result:
print "found. %s" % cached
self.compiler.define_macro('HAVE_GETIFADDRS', 1)
else:
print "not found. %s" % cached
results['have_getifaddrs'] = result
print "checking for getnameinfo...",
result = results.get('have_getnameinfo', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
outname = os.path.join(self.build_temp, 'conftest2.out')
self.ctout = os.open(outname, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <stdlib.h>
int main(void) {
struct sockaddr_in sin;
char buffer[256];
int ret;
sin.sin_family = AF_INET;
sin.sin_port = 0;
sin.sin_addr.s_addr = htonl (INADDR_LOOPBACK);
ret = getnameinfo ((struct sockaddr *)&sin, sizeof (sin),
buffer, sizeof (buffer),
NULL, 0,
NI_NUMERICHOST);
return 0;
}
"""
if self.test_build(testrig,libraries=libraries):
result = True
else:
result = False
if result:
print "found. %s" % cached
self.compiler.define_macro('HAVE_GETNAMEINFO', 1)
else:
print "not found. %s" % cached
results['have_getnameinfo'] = result
if not results['have_getifaddrs']:
print "checking for socket IOCTLs...",
result = results.get('have_socket_ioctls', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
outname = os.path.join(self.build_temp, 'conftest3.out')
self.ctout = os.open(outname, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
result = []
ioctls = ('SIOCGIFCONF',
'SIOCGSIZIFCONF',
'SIOCGIFHWADDR',
'SIOCGIFADDR',
'SIOCGIFFLAGS',
'SIOCGIFDSTADDR',
'SIOCGIFBRDADDR',
'SIOCGIFNETMASK',
'SIOCGLIFNUM',
'SIOCGLIFCONF',
'SIOCGLIFFLAGS')
added_includes = ""
if mos.startswith('sunos'):
added_includes = """
#include <unistd.h>
#include <stropts.h>
#include <sys/sockio.h>
"""
for ioctl in ioctls:
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/in.h>
#include <arpa/inet.h>
%(addedinc)s
int main(void) {
int fd = socket (AF_INET, SOCK_DGRAM, IPPROTO_IP);
struct ifreq ifreq;
ioctl(fd, %(ioctl)s, &ifreq);
return 0;
}
""" % { 'ioctl': ioctl , 'addedinc': added_includes}
if self.test_build(testrig,libraries=libraries):
result.append(ioctl)
if result:
print "%r. %s" % (result, cached)
for ioctl in result:
self.compiler.define_macro('HAVE_%s' % ioctl, 1)
self.compiler.define_macro('HAVE_SOCKET_IOCTLS', 1)
else:
print "not found. %s" % cached
results['have_socket_ioctls'] = result
print "checking for optional header files...",
result = results.get('have_headers', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
result =[]
headers = ('net/if_dl.h', 'netash/ash.h',
'netatalk/at.h', 'netax25/ax25.h',
'neteconet/ec.h', 'netipx/ipx.h',
'netpacket/packet.h', 'netrose/rose.h',
'linux/irda.h', 'linux/atm.h',
'linux/llc.h', 'linux/tipc.h',
'linux/dn.h')
for header in headers:
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
#include <%s>
int main (void) { return 0; }
""" % header
if self.test_build(testrig, link=False):
result.append(header)
if result:
print "%s. %s" % (' '.join(result), cached)
for header in result:
macro = header.upper().replace('.', '_').replace('/', '_')
self.compiler.define_macro('HAVE_%s' % macro, 1)
else:
print "none found. %s" % cached
optional_headers = result
results['have_headers'] = result
print "checking whether struct sockaddr has a length field...",
result = results.get('have_sockaddr_sa_len', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
int main (void) {
struct sockaddr sa;
sa.sa_len = 5;
return 0;
}
"""
result = self.test_build(testrig)
if result:
print 'yes. %s' % cached
self.compiler.define_macro('HAVE_SOCKADDR_SA_LEN', 1)
else:
print 'no. %s' % cached
results['have_sockaddr_sa_len'] = result
if not results['have_sockaddr_sa_len']:
# GAK! On certain stupid platforms (Linux), there's no sa_len.
# Macho Linux programmers apparently think that it's not needed,
# however, unfortunately, getifaddrs() doesn't return the
# lengths, because they're in the sa_len field on just about
# everything but Linux.
print "checking which sockaddr_xxx structs are defined...",
result = results.get('have_sockaddrs', None)
if result is not None:
cached = '(cached)'
else:
cached = ''
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
outname = os.path.join(self.build_temp, 'conftest4.out')
self.ctout = os.open(outname, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
sockaddrs = ('at', 'ax25', 'dl', 'eon', 'in', 'in6',
'inarp', 'ipx', 'iso', 'ns', 'un', 'x25',
'rose', 'ash', 'ec', 'll', 'atmpvc', 'atmsvc',
'dn', 'irda', 'llc')
result = []
for sockaddr in sockaddrs:
testrig = """
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <net/if.h>
#include <netinet/in.h>
%(includes)s
int main (void) {
struct sockaddr_%(sockaddr)s sa;
return 0;
}
""" % { 'includes': '\n'.join(["#include <%s>" % header
for header
in optional_headers]),
'sockaddr': sockaddr }
if self.test_build(testrig):
result.append(sockaddr)
if result:
print '%s. %s' % (' '.join(result), cached)
for sockaddr in result:
self.compiler.define_macro('HAVE_SOCKADDR_%s' \
% sockaddr.upper(), 1)
else:
print 'none! %s' % cached
results['have_sockaddrs'] = result
# Save the results to our config.cache file
myfile = open(cache_file, 'w')
try:
pickle.dump(results, myfile)
finally:
myfile.close()
# Don't bother detecting socket ioctls on Windows
if not getattr(sys, 'getwindowsversion', None):
setuptools.command.build_ext.build_ext = my_build_ext
setup (name='netifaces',
version=__version__,
description="Portable network interface information.",
license="MIT License",
long_description="""\
netifaces provides a (hopefully portable-ish) way for Python programmers to
get access to a list of the network interfaces on the local machine, and to
obtain the addresses of those network interfaces.
The package has been tested on Mac OS X, Windows XP, Windows Vista, Linux
and Solaris.
It should work on other UNIX-like systems provided they implement
either getifaddrs() or support the SIOCGIFxxx socket options, although the
data provided by the socket options is normally less complete.
""",
author='Alastair Houghton',
author_email='[email protected]',
url='http://alastairs-place.net/netifaces',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: System :: Networking',
],
ext_modules=[iface_mod])
| python | 14,886 |
print('='*50)
print('PESO IDEAL'.center(50))
print('='*50)
linha = '\033[1;96m=\033[m' * 50
def peso():
sexo = int(input('''Você é:
[ 1 ] Homem
[ 2 ] Mulher
Digite a número da sua opção: '''))
print('='*50)
altura = float(input('\nDigite a sua altura: '))
print(f'\n{linha}')
if sexo == 1:
print(f'\nO peso ideal para você é: {(72.7*altura) - 58:.3f}Kg')
elif sexo == 2:
print(f'\nO peso ideal para você é: {(62.1*altura) - 44.7:.3f}Kg')
print(f'\n{linha}')
peso() | python | 520 |
"""
Copyright (c) 2017-2021 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pydicom.dataset import Dataset
from deid.logger import bot
import re
# These filters are based off the the CTP Dicom Filter
# http://mircwiki.rsna.org/index.php?title=The_CTP_DICOM_Filter
# We don't apply them to the tag, as in their examples:
# !ImageType.contains("SECONDARY")
# we apply them to the dataset, with the tag as an argument:
# dicom.contains("ImageType","SECONDARY")
def apply_filter(dicom, field, filter_name, value):
"""essentially a switch statement to apply a filter to a dicom file.
Parameters
==========
dicom: the pydicom.dataset Dataset (pydicom.read_file)
field: the name of the field to apply the filter to
filer_name: the name of the filter to apply (e.g., contains)
value: the value to set, if filter_name is valid
"""
filter_name = filter_name.lower().strip()
if filter_name == "contains":
return dicom.contains(field, value)
if filter_name == "notcontains":
return dicom.notContains(field, value)
elif filter_name == "equals":
return dicom.equals(field, value)
elif filter_name == "missing":
return dicom.missing(field)
elif filter_name == "present":
return not dicom.missing(field)
elif filter_name == "empty":
return dicom.empty(field)
elif filter_name == "notequals":
return dicom.notEquals(field, value)
bot.warning("%s is not a valid filter name, returning False" % filter_name)
return False
################################################################################
# Equals
################################################################################
def equalsBase(self, field, term, ignore_case=True, not_equals=False):
"""base of equals, with variable for ignore case (default True)"""
is_equal = False
contenders = self.get(field)
if not isinstance(contenders, list):
contenders = [contenders]
# In this loop we can only switch to True
for contender in contenders:
if contender is not None:
try:
# both converted to string (handles tags)
contender = str(contender)
term = str(term)
if ignore_case:
contender = contender.lower().strip()
term = term.lower().strip()
if contender == term:
is_equal = True
except AttributeError:
pass # we are dealing with number or sequence
# If we want to know not_equals, reverse
if not_equals is True:
is_equal = not is_equal
return is_equal
def equals(self, field, term):
"""returns true if the value of the identifier exactly
equals the string argument; otherwise, it returns false."""
return self.equalsBase(field, term)
def notEquals(self, field, term):
return self.equalsBase(field=field, term=term, not_equals=True)
Dataset.equalsBase = equalsBase
Dataset.equals = equals
Dataset.notEquals = notEquals
################################################################################
# Empty and Null
#
# missing: means the field is not present (None)
# empty: means the field is present and empty
################################################################################
def missing(self, field):
"""missing returns True if the dicom is missing the field entirely
This means that the entire field is None
"""
content = self.get(field)
if content is None:
return True
return False
def empty(self, field):
"""empty returns True if the value is found to be "". If the field
is not present for the dicom, then we return False (missing != empty)
"""
if field not in self:
return False
content = self.get(field)
# Case 1: No content (empty list or none)
if not content:
return True
if hasattr(content, "_list"):
return len(content) == 0
# This is the case of a data element
elif not isinstance(content, str):
content = content.value
if content == "":
return True
return False
Dataset.empty = empty
Dataset.missing = missing
################################################################################
# Matches and Contains
#
# contains: searches across entire field
# matches: looks for exact match
################################################################################
def compareBase(self, field, expression, func, ignore_case=True):
"""compareBase takes either re.search (for contains) or
re.match (for matches) and returns True if the given regular
expression is contained or matched
"""
is_match = False
contenders = self.get(field)
if not isinstance(contenders, list):
contenders = [contenders]
for contender in contenders:
if contender is not None:
try:
contender = str(contender)
expression = str(expression)
if ignore_case:
contender = contender.lower().strip()
expression = expression.lower().strip()
if func(expression, contender):
is_match = True
except AttributeError:
pass # we are dealing with number or sequence
return is_match
def matches(self, field, expression):
"""matches returns true if the value of the identifier matches
the regular expression specified in the string argument;
otherwise, it returns false.
"""
return self.compareBase(field=field, expression=expression, func=re.match)
def contains(self, field, expression):
"""contains returns true if the value of the identifier
contains the the string argument anywhere within it;
otherwise, it returns false.
"""
return self.compareBase(field=field, expression=expression, func=re.search)
def notContains(self, field, expression):
"""notContains returns true if the value of the identifier
does not contain the the string argument anywhere within it;
"""
return not self.compareBase(field=field, expression=expression, func=re.search)
Dataset.compareBase = compareBase
Dataset.matches = matches
Dataset.contains = contains
Dataset.notContains = notContains
################################################################################
# Starts and Endswith
################################################################################
def startsWith(self, field, term):
"""startsWith returns true if the value of the identifier
starts with the string argument; otherwise, it returns false.
"""
expression = "^%s" % term
return self.compareBase(field=field, expression=expression, func=re.match)
def endsWith(self, field, term):
"""endsWith returns true if the value of the identifier ends with
the string argument; otherwise, it returns false.
"""
expression = "%s$" % term
return self.compareBase(field=field, expression=expression, func=re.match)
Dataset.startsWith = startsWith
Dataset.endsWith = endsWith
| python | 8,169 |
"""Class instance for Transformer
"""
import datetime
import logging
import os
from pyclowder.utils import setup_logging as pyc_setup_logging
import piexif
import configuration
# EXIF tags to look for, see https://www.exiv2.org/tags.html
EXIF_ORIGINAL_TIMESTAMP = 36867 # Capture timestamp
EXIF_TIMESTAMP_OFFSET = 36881 # Timestamp UTC offset (general)
EXIF_ORIGINAL_TIMESTAMP_OFFSET = 36881 # Capture timestamp UTC offset
class __internal__():
"""Class containing functions for this file only
"""
def __init__(self):
"""Perform class level initialization
"""
@staticmethod
def fromisoformat(timestamp):
"""Converts YYYY-MM-DDTHH:MI:SS, YYYY-MM-DDTHH:MI:SS.mmmm with or without a timezone offset to a datetime object
Arguments:
timestamp(str): the timestamp to convert
"""
if not timestamp:
return None
try:
logging.debug("Checking for specific characters in timestamp")
if 'T' in timestamp:
base_format = '%Y-%m-%dT%H:%M:%S'
elif '-' in timestamp:
base_format = '%Y-%m-%d %H:%M:%S'
else:
base_format = '%Y:%m:%d %H:%M:%S'
if '.' in timestamp:
base_format = base_format + '.%f'
if '+' in timestamp or (timestamp.rfind('-') > timestamp.rfind(':')):
base_format = base_format + '%z'
logging.info("Converting timestamp: '%s' %s", str(timestamp), base_format)
return datetime.datetime.strptime(timestamp, base_format)
except Exception as ex:
logging.error("Continuing after exception converting timestamp '%s': %s", str(timestamp), str(ex))
return None
@staticmethod
def exif_tags_to_timestamp(exif_tags):
"""Looks up the origin timestamp and a timestamp offset in the exit tags and returns
a datetime object
Args:
exif_tags(dict): The exif tags to search for timestamp information
Return:
Returns the origin timestamp when found. The return timestamp is adjusted for UTF if
an offset is found. None is returned if a valid timestamp isn't found.
"""
cur_stamp, cur_offset = (None, None)
def convert_and_clean_tag(value):
"""Internal helper function for handling EXIF tag values. Tests for an empty string after
stripping colons, '+', '-', and whitespace [the spec is unclear if a +/- is needed when
the timestamp offset is unknown (and spaces are used)].
Args:
value(bytes or str): The tag value
Return:
Returns the cleaned up, and converted from bytes, string. Or None if the value is empty
after stripping above characters and whitespace.
"""
if not value:
return None
# Convert bytes to string
if isinstance(value, bytes):
value = value.decode('UTF-8').strip()
else:
value = value.strip()
# Check for an empty string after stripping colons
if value:
if not value.replace(":", "").replace("+:", "").replace("-", "").strip():
value = None
return None if not value else value
# Process the EXIF data
if EXIF_ORIGINAL_TIMESTAMP in exif_tags:
cur_stamp = convert_and_clean_tag(exif_tags[EXIF_ORIGINAL_TIMESTAMP])
if not cur_stamp:
return None
if EXIF_ORIGINAL_TIMESTAMP_OFFSET in exif_tags:
cur_offset = convert_and_clean_tag(exif_tags[EXIF_ORIGINAL_TIMESTAMP_OFFSET])
if not cur_offset and EXIF_TIMESTAMP_OFFSET in exif_tags:
cur_offset = convert_and_clean_tag(exif_tags[EXIF_TIMESTAMP_OFFSET])
# Format the string to a timestamp and return the result
try:
if not cur_offset:
logging.debug("Converting EXIF timestamp without offset: '%s'", str(cur_stamp))
cur_ts = __internal__.fromisoformat(cur_stamp)
else:
logging.debug("Converting EXIF timestamp and offset: '%s' '%s'", str(cur_stamp), str(cur_offset))
cur_offset = cur_offset.replace(":", "")
cur_ts = __internal__.fromisoformat(cur_stamp + cur_offset)
except Exception as ex:
cur_ts = None
logging.debug("Exception caught converting EXIF tag to timestamp: %s", str(ex))
return cur_ts
@staticmethod
def get_first_timestamp(file_path, timestamp):
"""Looks for a timestamp in the specified file and returns
the earliest timestamp (when compared to the timestamp parameter)
Arguments:
file_path: the path to the file to check
timestamp: the timestamp to compare against (when specified)
Return:
The earliest found timestamp
"""
logging.debug("Getting first timestamp from timestamp and file: '%s' '%s'", str(timestamp), str(file_path))
first_stamp = __internal__.fromisoformat(timestamp)
try:
tags_dict = piexif.load(file_path)
if tags_dict and "Exif" in tags_dict:
cur_stamp = __internal__.exif_tags_to_timestamp(tags_dict["Exif"])
if cur_stamp:
first_stamp = cur_stamp if first_stamp is None or cur_stamp < first_stamp else first_stamp
except Exception as ex:
logging.debug("Exception caught getting timestamp from file: %s", file_path)
logging.debug(" %s", str(ex))
if first_stamp:
return first_stamp.isoformat()
return timestamp
class Transformer():
"""Generic class for supporting transformers
"""
# pylint: disable=unused-argument
def __init__(self, **kwargs):
"""Performs initialization of class instance
Arguments:
kwargs: additional parameters passed in to Transformer
"""
self.sensor = None
self.args = None
@property
def supported_image_file_exts(self):
"""Returns the list of supported image file extension strings (in lower case)
"""
return ['tif', 'tiff', 'jpg']
def add_parameters(self, parser):
"""Adds processing parameters to existing parameters
Arguments:
parser: instance of argparse
"""
# pylint: disable=no-self-use
parser.add_argument('--logging', '-l', nargs='?', default=os.getenv("LOGGING"),
help='file or url or logging configuration (default=None)')
parser.epilog = configuration.TRANSFORMER_NAME + ' version ' + configuration.TRANSFORMER_VERSION + \
' author ' + configuration.AUTHOR_NAME + ' ' + configuration.AUTHOR_EMAIL
def get_image_files(self, files_folders: list) -> list:
"""Returns a list of image files from the passed in list. Performs a shallow folder check (1 deep)
Arguments:
files_folders: a list of files and folders to parse
Return:
Returns a list of image files
"""
return_files = []
for one_path in files_folders:
if os.path.isdir(one_path):
for dir_path in os.listdir(one_path):
if not os.path.isdir(dir_path):
if os.path.splitext(dir_path)[1].lstrip('.').lower() in self.supported_image_file_exts:
return_files.append(os.path.join(one_path, dir_path))
elif os.path.splitext(one_path)[1].lstrip('.').lower() in self.supported_image_file_exts:
return_files.append(one_path)
return return_files
def get_transformer_params(self, args, metadata_list):
"""Returns a parameter list for processing data
Arguments:
args: result of calling argparse.parse_args
metadata_list: the loaded metadata
"""
# Setup logging
pyc_setup_logging(args.logging)
self.args = args
# Determine if we're using JSONLD
metadata = metadata_list[0]
if 'content' in metadata:
parse_md = metadata['content']
else:
parse_md = metadata
# Get the season, experiment, etc information
timestamp, season_name, experiment_name = None, None, None
logging.debug("Using the following experimental metadata: %s", str(parse_md))
if 'observationTimeStamp' in parse_md:
timestamp = parse_md['observationTimeStamp']
if 'season' in parse_md:
season_name = parse_md['season']
if 'studyName' in parse_md:
experiment_name = parse_md['studyName']
# Get the list of files, if there are some and find the earliest timestamp if a timestamp
# hasn't been specified yet
file_list = []
working_timestamp = timestamp
if args.file_list:
logging.debug("Looking for images in following list: %s", str(args.file_list))
check_list = self.get_image_files(args.file_list)
logging.debug("Found the following files: %s", str(check_list))
for one_file in check_list:
# Filter out arguments that are obviously not files
if not one_file.startswith('-'):
file_list.append(one_file)
# Only bother to get a timestamp if we don't have one specified
if timestamp is None:
working_timestamp = __internal__.get_first_timestamp(one_file, working_timestamp)
if timestamp is None and working_timestamp is not None:
timestamp = working_timestamp
parse_md['observationTimeStamp'] = timestamp
# Check for transformer specific metadata
transformer_md = None
if configuration.TRANSFORMER_NAME in parse_md:
transformer_md = parse_md[configuration.TRANSFORMER_NAME]
# Prepare our parameters
check_md = {'timestamp': timestamp,
'season': season_name,
'experiment': experiment_name,
'container_name': None,
'target_container_name': None,
'trigger_name': None,
'context_md': parse_md,
'working_folder': args.working_space,
'list_files': lambda: file_list
}
return {'check_md': check_md,
'transformer_md': transformer_md,
'full_md': parse_md
}
| python | 10,764 |
"""This script combines all the image datasets into one larger dataset"""
import glob
import os
import random
import shutil
import warnings
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
def prepare_folders():
"""
Simple function creating all folders required for the dataset
"""
folders = [
"data/train/image/train",
"data/train/image/val",
"data/train/image/test",
]
subfolders = [
"angry",
"disgust",
"fear",
"happy",
"neutral",
"sad",
"surprise",
]
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder)
for subfolder in subfolders:
os.makedirs(os.path.join(folder, subfolder))
def copy_kaggle_dataset(train_split=0.8):
"""
This function copies the images from the kaggle dataset over.
:param train_split: Float between 0 and 1 determining how big training
dataset is relatively.
"""
if not os.path.exists("data/train/image/images"):
warnings.warn("Kaggle Dataset not downloaded. Skipping!")
return
print("Copying kaggle dataset.")
shutil.copytree(
"data/train/image/images/validation",
"data/train/image/test",
dirs_exist_ok=True,
)
subfolders = [
"angry",
"disgust",
"fear",
"happy",
"neutral",
"sad",
"surprise",
]
for subfolder in subfolders:
workdir = os.path.join("data/train/image/images/train", subfolder)
file_list = np.array(os.listdir(workdir))
np.random.shuffle(file_list)
for file in file_list[: int(train_split * file_list.shape[0])]:
shutil.copyfile(
os.path.join(workdir, file),
os.path.join("data/train/image/train", subfolder, file),
)
for file in file_list[int(train_split * file_list.shape[0]) :]:
shutil.copyfile(
os.path.join(workdir, file),
os.path.join("data/train/image/val", subfolder, file),
)
print("Kaggle copying successful.")
def copy_jaffe_dataset():
"""
This function copies the images from the kaggle dataset over.
:param train_split: Float between 0 and 1 determining how big training
dataset is relatively.
"""
if not os.path.exists("data/train/image/jaffedbase"):
warnings.warn("JAFFE Dataset not downloaded. Skipping!")
return
print("Copying JAFFE dataset.")
emotions = {
"AN": "angry",
"DI": "disgust",
"FE": "fear",
"HA": "happy",
"NE": "neutral",
"SA": "sad",
"SU": "surprise",
}
images = {}
for emotion in emotions.values():
images[emotion] = []
for image_path in glob.glob("data/train/image/jaffedbase/*.tiff"):
images[emotions[os.path.basename(image_path)[3:5]]].append(image_path)
for emotion, image_list in images.items():
random.shuffle(image_list)
for im in image_list[: int(0.6 * len(image_list))]:
# Copy training
img = Image.open(im)
img = img.convert("RGB")
img = img.resize((48, 48))
img.save(
os.path.join(
"data/train/image/train",
emotion,
os.path.basename(im)[:-4] + "png",
)
)
for im in image_list[
int(0.6 * len(image_list)) : int(0.8 * len(image_list))
]:
# Copy val
img = Image.open(im)
img = img.convert("RGB")
img = img.resize((48, 48))
img.save(
os.path.join(
"data/train/image/val",
emotion,
os.path.basename(im)[:-4] + "png",
)
)
for im in image_list[int(0.8 * len(image_list)) :]:
# Copy test
img = Image.open(im)
img = img.convert("RGB")
img = img.resize((48, 48))
img.save(
os.path.join(
"data/train/image/test",
emotion,
os.path.basename(im)[:-4] + "png",
)
)
print("JAFFE copying successful.")
def copy_fer_dataset(logging=False):
"""
Function that prepares the FER2013 dataset for training classifiers
:param logging: Activate loggin for correctness check of the data
"""
if not os.path.exists("data/train/image/fer2013"):
warnings.warn("FER2013 Dataset not downloaded. Skipping!")
return
print("Copying FER2013 dataset.")
image_data = pd.read_csv(
"data/train/image/fer2013/fer2013.csv",
delimiter=",",
header=0,
usecols=[1],
)
label_data = pd.read_csv(
"data/train/image/fer2013/fer_labels.csv", delimiter=",", header=0
)
folders = {"Training": "train", "PublicTest": "val", "PrivateTest": "test"}
emotions = [
"neutral",
"happy",
"surprise",
"sad",
"angry",
"disgust",
"fear",
]
all_labels = label_data.to_numpy()[:, 2:12]
for index in range(image_data.shape[0]):
emotion_index = np.argmax(all_labels[index, :])
intermed = all_labels[index, :].copy()
intermed[emotion_index] = 0
if emotion_index < 7 and all_labels[index, emotion_index] > np.max(
intermed
):
emotion = emotions[emotion_index]
image = np.reshape(
np.fromstring(image_data.iloc[index][0], sep=" "), (48, 48)
)
im = Image.fromarray(image)
im = im.convert("RGB")
save_path = os.path.join(
"data/train/image",
folders[label_data.iloc[index, 0]],
emotion,
f"fer_{index}.jpeg",
)
im.save(save_path)
elif logging:
print(f"Skipping index {index}, reason: {all_labels[index, :]}")
print("FER2013 copying successful.")
def copy_affectnet_dataset():
"""
Function that reads the affectnet dataset into the corresponding image
folders and divides it into train, validation and test images.
"""
if not os.path.exists("data/train/image/AffectNet"):
warnings.warn("AffectNet Dataset not downloaded. Skipping!")
return
print("Copying AffectNet dataset.")
image_data = pd.read_csv(
"data/train/image/AffectNet/AffectNet_images_all.csv",
delimiter=",",
header=0,
names=["labels", "images"],
)
labels = image_data.labels.to_numpy()
emotions = [
"angry",
"fear",
"happy",
"sad",
"surprise",
"neutral",
"contempt",
"disgust",
]
for emotion_index in [0, 1, 2, 3, 4, 5, 7]:
emotion_indices = np.where(labels == emotion_index)[0]
emotion = emotions[emotion_index]
for count, image_index in enumerate(emotion_indices):
image = np.reshape(
np.fromstring(image_data.iloc[image_index][1], sep=" "),
(100, 100),
)
im = Image.fromarray(image)
im = im.convert("RGB")
im = im.resize((48, 48))
if count / emotion_indices.shape[0] <= 0.6:
folder = "train"
elif count / emotion_indices.shape[0] <= 0.8:
folder = "val"
else:
folder = "test"
save_path = os.path.join(
"data/train/image",
folder,
emotion,
f"aff_{image_index}.jpeg",
)
im.save(save_path)
print("AffectNet copying successful.")
def copy_ffhq_dataset():
"""
Function that reads the ffhq dataset into the corresponding image folders
and divides it into train, validation and test images.
"""
if not os.path.exists("data/train/image/FFHQ"):
warnings.warn("FFHQ Dataset not downloaded. Skipping!")
return
print("Copying FFHQ dataset.")
label_data = pd.read_csv(
"data/train/image/FFHQ/FFHQ_6033.csv",
delimiter=",",
header=0,
names=["image_id", "labels"],
)
labels = label_data.labels.to_numpy()
emotions = [
"angry",
"fear",
"happy",
"sad",
"surprise",
"neutral",
"contempt",
"disgust",
]
for emotion_index in [0, 1, 2, 3, 4, 5, 7]:
emotion_indices = np.where(labels == emotion_index)[0]
emotion = emotions[emotion_index]
for count, image_index in enumerate(emotion_indices):
image_name = label_data.iloc[image_index][0]
if not os.path.exists(
f"data/train/image/FFHQ/images/{image_name:05d}.png"
):
# print(f"Image {image_name} missing")
continue
if count / emotion_indices.shape[0] <= 0.6:
folder = "train"
elif count / emotion_indices.shape[0] <= 0.8:
folder = "val"
else:
folder = "test"
im = Image.open(
f"data/train/image/FFHQ/images/{image_name:05d}.png"
)
im = ImageOps.grayscale(im)
im = im.resize((48, 48))
save_path = os.path.join(
"data/train/image",
folder,
emotion,
f"ffhq_{image_name:05d}.jpeg",
)
im.save(save_path)
print("FFHQ copying successful.")
def copy_ckplus_dataset():
"""
Function that reads the CK+ dataset into the corresponding image folders
and divides it into train, validation and test images.
"""
if not os.path.exists("data/train/image/CK+"):
warnings.warn("CK+ Dataset not downloaded. Skipping!")
return
print("Copying CK+ dataset.")
emotions = [
"neutral",
"angry",
"contempt",
"disgust",
"fear",
"happy",
"sad",
"surprise",
]
emotion_images = {emotion: [] for emotion in emotions}
emotion_files = glob.glob(
"data/train/image/CK+/Emotion/**/**/*_emotion.txt"
)
for emotion_file in emotion_files:
with open(emotion_file, "r") as file:
emotion = int(float(file.readline().strip()))
if emotion == 2:
continue
image_path = emotion_file.replace("Emotion", "cohn-kanade-images")
image_path = image_path.replace("_emotion.txt", ".png")
im = Image.open(image_path)
width, height = im.size
new_size = min(width, height)
left = (width - new_size) / 2
top = (height - new_size) / 2
right = (width + new_size) / 2
bottom = (height + new_size) / 2
im = im.crop((left, top, right, bottom))
im = im.resize((48, 48))
im.convert("RGB")
emotion_images[emotions[emotion]].append(im)
for emotion in emotions:
for count, im in enumerate(emotion_images[emotion]):
if count / len(emotion_images[emotion]) <= 0.6:
folder = "train"
elif count / len(emotion_images[emotion]) <= 0.8:
folder = "val"
else:
folder = "test"
save_path = os.path.join(
"data/train/image",
folder,
emotion,
f"ckplus_{count:05d}.jpeg",
)
im.save(save_path)
print("CK+ copying successful.")
def copy_bu3dfe_data():
if not os.path.exists("data/train/image/BU_3DFE"):
warnings.warn("BU3DFE Dataset not downloaded. Skipping!")
return
print("Copying BU3DFE dataset.")
emotions = {
"AN": "angry",
"DI": "disgust",
"FE": "fear",
"HA": "happy",
"NE": "neutral",
"SA": "sad",
"SU": "surprise",
}
frontal_files = glob.glob("data/train/image/BU_3DFE/**/*_F2D.bmp")
emotion_images = {emotion: [] for emotion in emotions.values()}
for image_file in frontal_files:
emotion = image_file.split("/")[-1][6:8]
im = Image.open(image_file)
im = im.resize((48, 48))
im = ImageOps.grayscale(im)
im.convert("RGB")
emotion_images[emotions[emotion]].append(im)
for emotion in emotions.values():
for count, im in enumerate(emotion_images[emotion]):
if count / len(emotion_images[emotion]) <= 0.6:
folder = "train"
elif count / len(emotion_images[emotion]) <= 0.8:
folder = "val"
else:
folder = "test"
save_path = os.path.join(
"data/train/image",
folder,
emotion,
f"bu3dfe_f_{count:05d}.jpeg",
)
im.save(save_path)
side_files = glob.glob("data/train/image/BU_3DFE/**/*_F3D.bmp")
emotion_images = {emotion: [] for emotion in emotions.values()}
for image_file in side_files:
emotion = image_file.split("/")[-1][6:8]
im = Image.open(image_file)
im = ImageOps.grayscale(im)
im.convert("RGB")
image_arr = np.asarray(im)
im1, im2 = separate_image(image_arr)
emotion_images[emotions[emotion]].append(im1)
emotion_images[emotions[emotion]].append(im2)
for emotion in emotions.values():
for count, im in enumerate(emotion_images[emotion]):
if count / len(emotion_images[emotion]) <= 0.6:
folder = "train"
elif count / len(emotion_images[emotion]) <= 0.8:
folder = "val"
else:
folder = "test"
save_path = os.path.join(
"data/train/image",
folder,
emotion,
f"bu3dfe_s_{count:05d}.jpeg",
)
im.save(save_path)
print("BU3DFE copying successful.")
def separate_image(image):
"""
Highly inefficient function for splitting the weirdly stored images from
BU3DFE dataset into two separate images.
:param image: The image array to split
:return: Tuple of two image arrays
"""
val0 = image[0, 0]
row_index = 1
while image[row_index, 0] == val0:
row_index += 1
val1 = image[row_index, 0]
height = 1
width = 1
max_width = False
max_height = False
while not (max_width and max_height):
if np.all(image[row_index : row_index + height + 1, 0:width] == val1):
height += 1
else:
max_height = True
if np.all(
image[row_index : row_index + height, 0 : width + 1] == val1
):
width += 1
else:
max_width = True
im1 = image[row_index + height :, 0:width]
im2 = image[row_index:, width:]
return padding(im1[::-1, ::-1]), padding(im2)
def padding(image):
desired_size = max(image.shape)
delta_w = desired_size - image.shape[1]
delta_h = desired_size - image.shape[0]
padding = (
delta_w // 2,
delta_h // 2,
delta_w - (delta_w // 2),
delta_h - (delta_h // 2),
)
image = Image.fromarray(image)
image = ImageOps.expand(image, padding)
image = image.resize((48, 48))
return image
if __name__ == "__main__":
prepare_folders()
copy_kaggle_dataset()
copy_jaffe_dataset()
copy_fer_dataset()
copy_affectnet_dataset()
copy_ffhq_dataset()
copy_ckplus_dataset()
copy_bu3dfe_data()
| python | 15,781 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from typing import Any, Callable, Dict, List, Optional
import yaml
from flask_appbuilder import Model
from sqlalchemy.orm import Session
from sqlalchemy.orm.session import make_transient
from superset import db
from superset.commands.base import BaseCommand
from superset.commands.importers.exceptions import IncorrectVersionError
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.databases.commands.exceptions import DatabaseNotFoundError
from superset.models.core import Database
from superset.utils.dict_import_export import DATABASES_KEY, DRUID_CLUSTERS_KEY
logger = logging.getLogger(__name__)
def lookup_sqla_table(table: SqlaTable) -> Optional[SqlaTable]:
return (
db.session.query(SqlaTable)
.join(Database)
.filter(
SqlaTable.table_name == table.table_name,
SqlaTable.schema == table.schema,
Database.id == table.database_id,
)
.first()
)
def lookup_sqla_database(table: SqlaTable) -> Optional[Database]:
database = (
db.session.query(Database)
.filter_by(database_name=table.params_dict["database_name"])
.one_or_none()
)
if database is None:
raise DatabaseNotFoundError
return database
def lookup_druid_cluster(datasource: DruidDatasource) -> Optional[DruidCluster]:
return db.session.query(DruidCluster).filter_by(id=datasource.cluster_id).first()
def lookup_druid_datasource(datasource: DruidDatasource) -> Optional[DruidDatasource]:
return (
db.session.query(DruidDatasource)
.filter(
DruidDatasource.datasource_name == datasource.datasource_name,
DruidDatasource.cluster_id == datasource.cluster_id,
)
.first()
)
def import_dataset(
i_datasource: BaseDatasource,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> int:
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copied over.
"""
lookup_database: Callable[[BaseDatasource], Optional[Database]]
lookup_datasource: Callable[[BaseDatasource], Optional[BaseDatasource]]
if isinstance(i_datasource, SqlaTable):
lookup_database = lookup_sqla_database
lookup_datasource = lookup_sqla_table
else:
lookup_database = lookup_druid_cluster
lookup_datasource = lookup_druid_datasource
return import_datasource(
db.session,
i_datasource,
lookup_database,
lookup_datasource,
import_time,
database_id,
)
def lookup_sqla_metric(session: Session, metric: SqlMetric) -> SqlMetric:
return (
session.query(SqlMetric)
.filter(
SqlMetric.table_id == metric.table_id,
SqlMetric.metric_name == metric.metric_name,
)
.first()
)
def lookup_druid_metric(session: Session, metric: DruidMetric) -> DruidMetric:
return (
session.query(DruidMetric)
.filter(
DruidMetric.datasource_id == metric.datasource_id,
DruidMetric.metric_name == metric.metric_name,
)
.first()
)
def import_metric(session: Session, metric: BaseMetric) -> BaseMetric:
if isinstance(metric, SqlMetric):
lookup_metric = lookup_sqla_metric
else:
lookup_metric = lookup_druid_metric
return import_simple_obj(session, metric, lookup_metric)
def lookup_sqla_column(session: Session, column: TableColumn) -> TableColumn:
return (
session.query(TableColumn)
.filter(
TableColumn.table_id == column.table_id,
TableColumn.column_name == column.column_name,
)
.first()
)
def lookup_druid_column(session: Session, column: DruidColumn) -> DruidColumn:
return (
session.query(DruidColumn)
.filter(
DruidColumn.datasource_id == column.datasource_id,
DruidColumn.column_name == column.column_name,
)
.first()
)
def import_column(session: Session, column: BaseColumn) -> BaseColumn:
if isinstance(column, TableColumn):
lookup_column = lookup_sqla_column
else:
lookup_column = lookup_druid_column
return import_simple_obj(session, column, lookup_column)
def import_datasource( # pylint: disable=too-many-arguments
session: Session,
i_datasource: Model,
lookup_database: Callable[[Model], Optional[Model]],
lookup_datasource: Callable[[Model], Optional[Model]],
import_time: Optional[int] = None,
database_id: Optional[int] = None,
) -> int:
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export datasources between multiple
superset instances. Audit metadata isn't copies over.
"""
make_transient(i_datasource)
logger.info("Started import of the datasource: %s", i_datasource.to_json())
i_datasource.id = None
i_datasource.database_id = (
database_id
if database_id
else getattr(lookup_database(i_datasource), "id", None)
)
i_datasource.alter_params(import_time=import_time)
# override the datasource
datasource = lookup_datasource(i_datasource)
if datasource:
datasource.override(i_datasource)
session.flush()
else:
datasource = i_datasource.copy()
session.add(datasource)
session.flush()
for metric in i_datasource.metrics:
new_m = metric.copy()
new_m.table_id = datasource.id
logger.info(
"Importing metric %s from the datasource: %s",
new_m.to_json(),
i_datasource.full_name,
)
imported_m = import_metric(session, new_m)
if imported_m.metric_name not in [m.metric_name for m in datasource.metrics]:
datasource.metrics.append(imported_m)
for column in i_datasource.columns:
new_c = column.copy()
new_c.table_id = datasource.id
logger.info(
"Importing column %s from the datasource: %s",
new_c.to_json(),
i_datasource.full_name,
)
imported_c = import_column(session, new_c)
if imported_c.column_name not in [c.column_name for c in datasource.columns]:
datasource.columns.append(imported_c)
session.flush()
return datasource.id
def import_simple_obj(
session: Session, i_obj: Model, lookup_obj: Callable[[Session, Model], Model]
) -> Model:
make_transient(i_obj)
i_obj.id = None
i_obj.table = None
# find if the column was already imported
existing_column = lookup_obj(session, i_obj)
i_obj.table = None
if existing_column:
existing_column.override(i_obj)
session.flush()
return existing_column
session.add(i_obj)
session.flush()
return i_obj
def import_from_dict(
session: Session, data: Dict[str, Any], sync: Optional[List[str]] = None
) -> None:
"""Imports databases and druid clusters from dictionary"""
if not sync:
sync = []
if isinstance(data, dict):
logger.info("Importing %d %s", len(data.get(DATABASES_KEY, [])), DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logger.info(
"Importing %d %s", len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY
)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logger.info("Supplied object is not a dictionary.")
class ImportDatasetsCommand(BaseCommand):
"""
Import datasources in YAML format.
This is the original unversioned format used to export and import datasources
in Superset.
"""
# pylint: disable=unused-argument
def __init__(self, contents: Dict[str, str], **kwargs: Any):
self.contents = contents
self._configs: Dict[str, Any] = {}
self.sync = []
if kwargs.get("sync_columns"):
self.sync.append("columns")
if kwargs.get("sync_metrics"):
self.sync.append("metrics")
def run(self) -> None:
self.validate()
# TODO (betodealmeida): add rollback in case of error
for file_name, config in self._configs.items():
logger.info("Importing dataset from file %s", file_name)
if isinstance(config, dict):
import_from_dict(db.session, config, sync=self.sync)
else: # list
for dataset in config:
# UI exports don't have the database metadata, so we assume
# the DB exists and has the same name
params = json.loads(dataset["params"])
database = (
db.session.query(Database)
.filter_by(database_name=params["database_name"])
.one()
)
dataset["database_id"] = database.id
SqlaTable.import_from_dict(db.session, dataset, sync=self.sync)
def validate(self) -> None:
# ensure all files are YAML
for file_name, content in self.contents.items():
try:
config = yaml.safe_load(content)
except yaml.parser.ParserError:
logger.exception("Invalid YAML file")
raise IncorrectVersionError(f"{file_name} is not a valid YAML file")
# CLI export
if isinstance(config, dict):
# TODO (betodealmeida): validate with Marshmallow
if DATABASES_KEY not in config and DRUID_CLUSTERS_KEY not in config:
raise IncorrectVersionError(f"{file_name} has no valid keys")
# UI export
elif isinstance(config, list):
# TODO (betodealmeida): validate with Marshmallow
pass
else:
raise IncorrectVersionError(f"{file_name} is not a valid file")
self._configs[file_name] = config
| python | 11,432 |
# -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
'''
This is a destructive test as it adds a repository.
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
def test_pkgrepo_02_absent(self):
'''
This is a destructive test as it removes the repository added in the
above test.
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
| python | 2,938 |
#!/usr/bin/env python3
"""
Copyright (c) Facebook, Inc. and its affiliates.
Calibration plots, both cumulative and traditional, with weighted sampling
Functions
---------
cumulative
Cumulative difference between observed and expected vals of Bernoulli vars
equiprob
Reliability diagram with roughly equispaced average probabilities over bins
equierr
Reliability diagram with similar ratio L2-norm / L1-norm of weights by bin
exactdist
Reliability diagram with exact distributions plotted
This source code is licensed under the MIT license found in the LICENSE file in
the root directory of this source tree.
"""
import math
import os
import random
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedFormatter
def cumulative(r, s, majorticks, minorticks, filename='cumulative.pdf',
title='miscalibration is the slope as a function of $A_k$',
fraction=1, weights=None):
"""
Cumulative difference between observed and expected vals of Bernoulli vars
Saves a plot of the difference between the normalized cumulative
weighted sums of r and the normalized cumulative weighted sums of s,
with majorticks major ticks and minorticks minor ticks on the lower axis,
labeling the major ticks with the corresponding values from s.
Parameters
----------
r : array_like
class labels (0 for incorrect and 1 for correct classification)
s : array_like
success probabilities (must be unique and in strictly increasing order)
majorticks : int
number of major ticks on each of the horizontal axes
minorticks : int
number of minor ticks on the lower axis
filename : string, optional
name of the file in which to save the plot
title : string, optional
title of the plot
fraction : float, optional
proportion of the full horizontal axis to display
weights : array_like, optional
weights of the observations
(the default None results in equal weighting)
Returns
-------
float
Kuiper statistic
float
Kolmogorov-Smirnov statistic
float
quarter of the full height of the isosceles triangle
at the origin in the plot
"""
def histcounts(nbins, a):
# Counts the number of entries of a
# falling into each of nbins equispaced bins.
j = 0
nbin = np.zeros(nbins, dtype=np.int64)
for k in range(len(a)):
if a[k] > (j + 1) / nbins:
j += 1
if j == nbins:
break
nbin[j] += 1
return nbin
assert all(s[k] < s[k + 1] for k in range(len(s) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w[:int(len(w) * fraction)].sum()
# Create the figure.
plt.figure()
ax = plt.axes()
# Accumulate the weighted r and s, as well as w.
f = np.insert(np.cumsum(w * r), 0, [0])
ft = np.insert(np.cumsum(w * s), 0, [0])
x = np.insert(np.cumsum(w), 0, [0])
# Plot the difference.
plt.plot(
x[:int(len(x) * fraction)], (f - ft)[:int(len(f) * fraction)], 'k')
# Make sure the plot includes the origin.
plt.plot(0, 'k')
# Add an indicator of the scale of 1/sqrt(n) to the vertical axis.
ssub = np.insert(s, 0, [0])[:(int(len(s) * fraction) + 1)]
lenscale = np.sqrt(np.sum(w**2 * ssub[1:] * (1 - ssub[1:])))
plt.plot(2 * lenscale, 'k')
plt.plot(-2 * lenscale, 'k')
kwargs = {
'head_length': 2 * lenscale, 'head_width': fraction / 20, 'width': 0,
'linewidth': 0, 'length_includes_head': True, 'color': 'k'}
plt.arrow(.1e-100, -2 * lenscale, 0, 4 * lenscale, shape='left', **kwargs)
plt.arrow(.1e-100, 2 * lenscale, 0, -4 * lenscale, shape='right', **kwargs)
plt.margins(x=0, y=.1)
# Label the major ticks of the lower axis with the values of s.
ss = ['{:.2f}'.format(a) for a in
ssub[::(len(ssub) // majorticks)].tolist()]
lenxf = int(len(x) * fraction)
plt.xticks(x[:lenxf:(lenxf // majorticks)], ss)
if len(ssub) >= 300 and minorticks >= 50:
# Indicate the distribution of s via unlabeled minor ticks.
plt.minorticks_on()
ax.tick_params(which='minor', axis='x')
ax.tick_params(which='minor', axis='y', left=False)
ax.set_xticks(x[np.cumsum(histcounts(minorticks, ssub[1:]))],
minor=True)
# Label the axes.
plt.xlabel('$S_k$')
plt.ylabel('$F_k - \\tilde{F}_k$')
ax2 = plt.twiny()
plt.xlabel(
'$k/n$ (together with minor ticks at equispaced values of $A_k$)')
ax2.tick_params(which='minor', axis='x', top=True, direction='in', pad=-17)
ax2.set_xticks(np.arange(0, 1 + 1 / majorticks, 1 / majorticks),
minor=True)
ks = ['{:.2f}'.format(a) for a in
np.arange(0, 1 + 1 / majorticks, 1 / majorticks).tolist()]
alist = (lenxf - 1) * np.arange(0, 1 + 1 / majorticks, 1 / majorticks)
alist = alist.tolist()
plt.xticks([x[int(a)] for a in alist], ks)
ax2.xaxis.set_minor_formatter(FixedFormatter(
[r'$A_k\!=\!{:.2f}$'.format(1 / majorticks)]
+ [r'${:.2f}$'.format(k / majorticks) for k in range(2, majorticks)]))
# Title the plot.
plt.title(title)
# Clean up the whitespace in the plot.
plt.tight_layout()
# Save the plot.
plt.savefig(filename, bbox_inches='tight')
plt.close()
# Calculate summary statistics.
fft = (f - ft)[:int(len(f) * fraction)]
kuiper = np.max(fft) - np.min(fft)
kolmogorov_smirnov = np.max(np.abs(fft))
return kuiper, kolmogorov_smirnov, lenscale
def equiprob(r, s, nbins, filename='equiprob.pdf', n_resamp=0, weights=None):
"""
Reliability diagram with roughly equispaced average probabilities over bins
Plots a reliability diagram with roughly equispaced average probabilities
for the bins.
Parameters
----------
r : array_like
class labels (0 for incorrect and 1 for correct classification)
s : array_like
success probabilities (must be in non-decreasing order)
nbins : int
number of bins
filename : string, optional
name of the file in which to save the plot
n_resamp : int, optional
number of times to resample and plot an extra line for error bars
weights : array_like, optional
weights of the observations
(the default None results in equal weighting)
Returns
-------
None
"""
def bintwo(nbins, a, b, q, w):
# Determines the total weight of entries of q falling into each
# of nbins equispaced bins, and calculates the weighted average per bin
# of the arrays a and b, returning np.nan as the "average"
# for any bin that is empty.
j = 0
bina = np.zeros(nbins)
binb = np.zeros(nbins)
wbin = np.zeros(nbins)
for k in range(len(q)):
if q[k] > (j + 1) / nbins:
j += 1
if j == nbins:
break
bina[j] += w[k] * a[k]
binb[j] += w[k] * b[k]
wbin[j] += w[k]
# Normalize the sum for each bin to compute the arithmetic average.
bina = np.divide(bina, wbin, where=wbin != 0)
bina[np.where(wbin == 0)] = np.nan
binb = np.divide(binb, wbin, where=wbin != 0)
binb[np.where(wbin == 0)] = np.nan
return wbin, bina, binb
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w.sum()
# Create the figure.
plt.figure()
for _ in range(n_resamp):
# Resample from s, preserving the pairing of s with r and w.
srw = np.asarray([[s[k], r[k], w[k]] for k in
np.random.randint(0, len(s), (len(s)))])
perm = np.argsort(srw[:, 0])
ss = srw[perm, 0]
rs = srw[perm, 1]
ws = srw[perm, 2]
_, binrs, binss = bintwo(nbins, rs, ss, ss, ws)
# Use the light gray, "gainsboro".
plt.plot(binss, binrs, 'gainsboro')
_, binr, bins = bintwo(nbins, r, s, s, w)
# Use the solid black, "k".
plt.plot(bins, binr, 'k*:')
zeroone = np.asarray((0, 1))
plt.plot(zeroone, zeroone, 'k')
plt.xlim((0, 1))
plt.ylim((0, 1))
plt.xlabel('weighted average of $S_k$ for $k$ in the bin')
plt.ylabel('weighted average of $R_k$ for $k$ in the bin')
plt.title('reliability diagram')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
def equierr(r, s, nbins, filename='equierr.pdf', n_resamp=0, weights=None):
"""
Reliability diagram with similar ratio L2-norm / L1-norm of weights by bin
Plots a reliability diagram with the ratio of the L2 norm of the weights
to the L1 norm of the weights being roughly the same for every bin.
The L2 norm is the square root of the sum of the squares, while the L1 norm
is the sum of the absolute values.
Parameters
----------
r : array_like
class labels (0 for incorrect and 1 for correct classification)
s : array_like
success probabilities (must be in non-decreasing order)
nbins : int
rough number of bins to construct
filename : string, optional
name of the file in which to save the plot
n_resamp : int, optional
number of times to resample and plot an extra line for error bars
weights : array_like, optional
weights of the observations
(the default None results in equal weighting)
Returns
-------
int
number of bins constructed
"""
def inbintwo(a, b, inbin, w):
# Determines the total weight falling into the bins given by inbin,
# and calculates the weighted average per bin of the arrays a and b,
# returning np.nan as the "average" for any bin that is empty.
wbin = [w[inbin[k]:inbin[k + 1]].sum() for k in range(len(inbin) - 1)]
bina = [(w[inbin[k]:inbin[k + 1]] * a[inbin[k]:inbin[k + 1]]).sum()
for k in range(len(inbin) - 1)]
binb = [(w[inbin[k]:inbin[k + 1]] * b[inbin[k]:inbin[k + 1]]).sum()
for k in range(len(inbin) - 1)]
# Normalize the sum for each bin to compute the weighted average.
bina = np.divide(bina, wbin, where=wbin != 0)
bina[np.where(wbin == 0)] = np.nan
binb = np.divide(binb, wbin, where=wbin != 0)
binb[np.where(wbin == 0)] = np.nan
return wbin, bina, binb
def binbounds(nbins, w):
# Partitions w into around nbins bins, each with roughly equal ratio
# of the L2 norm of w in the bin to the L1 norm of w in the bin,
# returning the indices defining the bins in the list inbin.
proxy = len(w) // nbins
v = w[np.sort(np.random.permutation(len(w))[:proxy])]
# t is a heuristic threshold.
t = np.square(v).sum() / v.sum()**2
inbin = []
k = 0
while k < len(w) - 1:
inbin.append(k)
k += 1
s = w[k]
ss = w[k]**2
while ss / s**2 > t and k < len(w) - 1:
k += 1
s += w[k]
ss += w[k]**2
if len(w) - inbin[-1] < (inbin[-1] - inbin[-2]) / 2:
inbin[-1] = len(w)
else:
inbin.append(len(w))
return inbin
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w.sum()
inbin = binbounds(nbins, w)
# Create the figure.
plt.figure()
for _ in range(n_resamp):
# Resample from s, preserving the pairing of s with r and w.
srw = np.asarray([[s[k], r[k], w[k]] for k in
np.random.randint(0, len(s), (len(s)))])
perm = np.argsort(srw[:, 0])
ss = srw[perm, 0]
rs = srw[perm, 1]
ws = srw[perm, 2]
_, binrs, binss = inbintwo(rs, ss, inbin, ws)
# Use the light gray, "gainsboro".
plt.plot(binss, binrs, 'gainsboro')
_, binr, bins = inbintwo(r, s, inbin, w)
# Use the solid black, "k".
plt.plot(bins, binr, 'k*:')
zeroone = np.asarray((0, 1))
plt.plot(zeroone, zeroone, 'k')
plt.xlim((0, 1))
plt.ylim((0, 1))
plt.xlabel('weighted average of $S_k$ for $k$ in the bin')
plt.ylabel('weighted average of $R_k$ for $k$ in the bin')
title = r'reliability diagram'
title += r' ($\Vert W \Vert_2 / \Vert W \Vert_1$ is similar for every bin)'
plt.title(title)
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
return len(inbin) - 1
def exactdist(r, s, filename='exact.pdf'):
"""
Reliability diagram with exact distributions plotted
Plots a reliability diagram at full resolution with fractional numbers.
The entries of r should be the expected values of class labels,
not necessarily just 0s and 1s.
Parameters
----------
r : array_like
expected value of class labels
s : array_like
success probabilities (must be in non-decreasing order)
filename : string, optional
name of the file in which to save the plot
Returns
-------
None
"""
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
plt.figure()
plt.plot(s, r, 'k*:')
zeroone = np.asarray((0, 1))
plt.plot(zeroone, zeroone, 'k')
plt.xlim((0, 1))
plt.ylim((0, 1))
plt.xlabel('score $S_k$')
plt.ylabel('expected value ($P_k$) of outcome $R_k$')
plt.title('exact expectations')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
#
# Generate directories with plots as specified via the code below,
# with each directory named n_nbins_inds_indt
# (where n, nbins, inds, and indt are defined in the code below).
#
# Set parameters.
# minorticks is the number of minor ticks on the lower axis.
minorticks = 100
# majorticks is the number of major ticks on the lower axis.
majorticks = 10
# n is the number of observations.
for n in [100, 1000, 10000]:
# Construct weights.
weights = 4 - np.cos(9 * np.arange(n) / n)
# nbins is the number of bins for the reliability diagrams.
for nbins in [10, 40]:
if n == 100 and nbins == 40:
nbins = 4
# nbins must divide n evenly.
assert n % nbins == 0
# Construct predicted success probabilities.
sl = np.arange(0, 1, 1 / n) + 1 / (2 * n)
# ss is a list of predicted probabilities for 3 kinds of examples.
ss = [sl, np.square(sl), np.sqrt(sl)]
for inds, s in enumerate(ss):
# The probabilities must be in non-decreasing order.
s = np.sort(s)
# Construct true underlying probabilities for sampling.
d = .4
tl = -np.arange(-d, d, 2 * d / n) - d / n
to = 1 - s - np.sqrt(s * (1 - s)) * (1 - np.sin(5 / (1.1 - s)))
# ts is a list of true probabilities for 4 kinds of examples.
ts = [tl, to, d - np.abs(tl), np.zeros(n)]
ts[2][(n // 2 - n // 50):(n // 2 + n // 50)] = 0
for indt, t in enumerate(ts):
# Limit consideration to only a third of the ss and ts.
if indt != 3 and (inds + indt) % 3 != 0:
continue
if indt == 3 and inds != 1:
continue
# Set a unique directory for each collection of experiments
# (creating the directory if necessary).
dir = 'weighted'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir = 'weighted/' + str(n) + '_' + str(nbins)
dir = dir + '_' + str(inds)
dir = dir + '_' + str(indt)
try:
os.mkdir(dir)
except FileExistsError:
pass
dir = dir + '/'
# Generate a sample of classifications into two classes,
# correct (class 1) and incorrect (class 0),
# avoiding numpy's random number generators
# that are based on random bits --
# they yield strange results for many seeds.
random.seed(987654321)
uniform = np.asarray([random.random() for _ in range(n)])
r = (uniform <= s + t).astype(float)
print(f'./{dir} is under construction....')
# Generate five plots and a text file reporting metrics.
filename = dir + 'cumulative.pdf'
kuiper, kolmogorov_smirnov, lenscale = cumulative(
r, s, majorticks, minorticks, filename,
weights=weights)
filename = dir + 'metrics.txt'
with open(filename, 'w') as f:
f.write('n:\n')
f.write(f'{n}\n')
f.write('lenscale:\n')
f.write(f'{lenscale}\n')
f.write('Kuiper:\n')
f.write(f'{kuiper:.4}\n')
f.write('Kolmogorov-Smirnov:\n')
f.write(f'{kolmogorov_smirnov:.4}\n')
f.write('Kuiper / lenscale:\n')
f.write(f'{(kuiper / lenscale):.4}\n')
f.write('Kolmogorov-Smirnov / lenscale:\n')
f.write(f'{(kolmogorov_smirnov / lenscale):.4}\n')
filename = dir + 'equiprob.pdf'
equiprob(r, s, nbins, filename, 20, weights)
filename = dir + 'equierr.pdf'
equierr(r, s, nbins, filename, 20, weights)
filename = dir + 'cumulative_exact.pdf'
_, _, _ = cumulative(
s + t, s, majorticks, minorticks, filename,
title='exact expectations', weights=weights)
filename = dir + 'exact.pdf'
exactdist(s + t, s, filename)
| python | 18,856 |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Module for probabilistic programming transformations.
## Probabilistic programs
A probabilistic program is defined as a JAX function that takes in a
`jax.random.PRNGKey as its first input, and any number of subsequent
conditioning arguments. The output of the function is the output of the
probabilistic program.
### A simple program:
```python
def f(key):
return random.normal(key)
```
In this program, we sample a random normal variable and return it. Conceptually,
this program represents the distribution `p(x) = Normal(0, 1)`.
### A conditional program:
```python
def f(key, z):
return z * random.normal(key)
```
In this program we sample a distribution conditional on `z` (i.e. a distribution
`p(x | z)`).
## Function transformations
The goal of the probabilistic programming package is to enable writing simple
programs and to use program transformations to create complexity. Here we
outline some of the transformations available in the module.
### `random_variable`
`random_variable` is a general purpose function that can be used to 1) tag
values for use in downstream transforms and 2) convert objects into
probabilistic programs. In implementation, `random_variable` is a
single-dispatch function whose implementation for functions and objects is
already registered. By default, it will tag a value with a name and will only
work on JAX types (e.g. DeviceArrays and tracers). We also register an
implementation for function types, where it returns the original function but
when provided the name, tags the output of the function. The registry enables
objects such as TensorFlow Probability distributions to register as as random
variable-like with Oryx.
Tagging a value in a probabilistic program as a random variable enables it to
be used by downstream transforms described below, such as `joint_sample`,
`conditional`, `intervene`, and `graph_replace`.
### `log_prob`
`log_prob` takes a probabilistic program and returns a function that computes
the log probability of a sample. It relies on the fact that certain sampling
primitives have been registered with the transformation. Specifically, it
returns a program that when provided an output from the program attempts to
compute the log-probability of *all* random samples in the program.
Examples:
```python
def f1(key):
return random_normal(key)
log_prob(f1)(0.) # ==> -0.9189385
log_prob(f1)(1.) # ==> -1.4189385
def f2(key):
k1, k2 = random.split(key)
return [random_normal(k1), random_normal(k2)]
log_prob(f2)([0., 0.]) # ==> -1.837877
```
For programs that sample variables that aren't returned as part of the output of
the program (latent variables), the `log_prob` of the program will error,
because there is insufficient information from the output of the program to
compute the log probabilities of all the random samples in the program.
```python
def f(key):
k1, k2 = random.split(key)
z = random_normal(k1)
return random_normal(k2) + z
log_prob(f)(0.) # ==> Error!
```
In this case, we can use `joint_sample` to transform it into one that returns
values for all the latent variables, and `log_prob` will compute the joint
log-probability of the function.
`log_prob` is also able to invert bijective functions and compute the
change-of-variables formula for probabilistic programs. For more details,
see `oryx.core.interpreters.log_prob`.
```python
def f(key):
return np.exp(random_normal(key))
log_prob(f)(np.exp(0.)) # ==> -0.9189385
log_prob(f)(np.exp(1.)) # ==> -2.4189386
```
### `joint_sample`
`joint_sample` takes a probabilistic program and returns another one that
returns a dictionary mapping named latent variables (tagged by
`random_variable`) to their latent values during execution.
Example:
```python
def f(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
joint_sample(f)(random.PRNGKey(0)) # ==> {'z': -0.0495, 'x': 0.193}
```
### `joint_log_prob`
`joint_log_prob` takes a probabilistic program and returns a function that
computes a log probability of dictionary mapping names to values corresponding
to random variables during the program's execution. It is the composition of
`log_prob` and `joint_sample`.
Example:
```python
def f(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
joint_log_prob(f)({'z': 0., 'x': 0.}) # ==> -1.837877
```
### `block`
`block` takes a probabilistic program and a sequence of string names and returns
the same program except that downstream transformations will ignore the provided
names.
Example:
```python
def f(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
joint_sample(block(f, names=['x']))(random.PRNGKey(0)) # ==> {'z': -0.0495}
```
### `intervene`
`intervene` takes a probabilistic program and a dictionary mapping names to
values of intervened random variables, and returns a new probabilistic program.
The new program runs the original, but when sampling a tagged random variable
whose name is present in the dictionary, it instead substitutes in the provided
value.
```python
def f1(key):
return random_variable(random.normal, name='x')(key)
intervene(f1, x=1.)(random.PRNGKey(0)) # => 1.
def f2(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
intervene(f2, z=1., x=1.)(random.PRNGKey(0)) # => 2.
```
### `conditional`
`conditional` is similar to `intervene`, except instead of taking a dictionary
of observations, it takes a list of names and returns a conditional
probabilistic program which takes additional arguments corresponding to random
variables with the aforementioned list of names.
Example:
```python
def f(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
conditional(f, ['z'])(random.PRNGKey(0), 0.) # => -1.25153887
conditional(f, ['z'])(random.PRNGKey(0), 1.) # => -0.25153887
conditional(f, ['z'. 'x'])(random.PRNGKey(0), 1., 2.) # => 3.
```
### `graph_replace`
`graph_replace` is a transformation that executes the original program but
with new inputs and outputs specified by random variable names. Input names
allow injecting values for random variables in the program, and the values of
random variables corresponding to output names are returned.
Example:
```python
def f(key):
k1, k2, k3 = random.split(key, 3)
z = random_variable(random_normal, name='z')(k1)
x = random_variable(lambda key: random_normal(key) + z, name='x')(k2)
y = random_variable(lambda key: random_normal(key) + x, name='y')(k3)
return y
graph_replace(f, 'z', 'y') # returns a program p(y | z) with a latent variable x
graph_replace(f, 'z', 'x') # returns a program p(x | z)
graph_replace(f, 'x', 'y') # returns a program p(y | x)
```
"""
import functools
import types
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from jax import util as jax_util
from oryx.core import primitive
from oryx.core.interpreters import harvest
from oryx.core.interpreters import log_prob as lp
from oryx.core.ppl import plate_util
__all__ = [
'block',
'random_variable',
'rv',
'nest',
'log_prob',
'joint_sample',
'joint_log_prob',
'intervene',
'conditional',
'graph_replace',
]
Program = Callable[..., Any]
Scalar = Any
LogProbFunction = Callable[..., Scalar]
RANDOM_VARIABLE = 'random_variable'
safe_zip = jax_util.safe_zip
nest = harvest.nest
@functools.singledispatch
def random_variable(obj,
*,
name: Optional[str] = None,
plate: Optional[str] = None) -> Program: # pylint: disable=redefined-outer-name
"""A single-dispatch function used to tag values and the outputs of programs.
`random_variable` is a single-dispatch function that enables registering
custom types. Its default implementation is to tag input value with a name
and return it.
Args:
obj: A JAX type to be tagged.
name (str): A string name to tag input value, cannot be `None`.
plate (str): A string named axis for this random variable's plate.
Returns:
The input value.
"""
if name is None:
raise ValueError(f'Cannot call `random_variable` on {type(obj)} '
'without passing in a name.')
if plate is not None:
raise ValueError(f'Cannot call `random_variable` on {type(obj)} '
'with a plate.')
return harvest.sow(obj, tag=RANDOM_VARIABLE, name=name, mode='strict')
def plate(f: Optional[Program] = None, name: Optional[str] = None):
"""Transforms a program into one that draws samples on a named axis.
In graphical model parlance, a plate designates independent random variables.
The `plate` transformation follows this idea, where a `plate`-ed program
draws independent samples. Unlike `jax.vmap`-ing a program, which also
produces independent samples with positional batch dimensions, `plate`
produces samples with implicit named axes. Named axis support is useful for
other JAX transformations like `pmap` and `xmap`.
Specifically, a `plate`-ed program creates a different key for each axis
of the named axis. `log_prob` reduces over the named axis to produce a single
value.
Example usage:
```python
@ppl.plate(name='foo')
def model(key):
return random_variable(random.normal)(key)
# We can't call model directly because there are implicit named axes present
try:
model(random.PRNGKey(0))
except NameError:
print('No named axis present!')
# If we vmap with a named axis, we produce independent samples.
vmap(model, axis_name='foo')(random.split(random.PRNGKey(0), 3)) #
```
Args:
f: a `Program` to transform. If `f` is `None`, `plate` returns a decorator.
name: a `str` name for the plate which can used as a name axis in JAX
functions and transformations.
Returns:
A decorator if `f` is `None` or a transformed program if `f` is provided.
The transformed program behaves produces independent across a named
axis with name `name`.
"""
def transform(f: Program) -> Program:
return plate_util.make_plate(f, name=name)
if f is not None:
return transform(f)
return transform
# Alias for random_variable
rv = random_variable
@random_variable.register(types.FunctionType)
@random_variable.register(functools.partial)
def function_random_variable(f: Program,
*,
name: Optional[str] = None,
plate: Optional[str] = None) -> Program: # pylint: disable=redefined-outer-name
"""Registers functions with the `random_variable` single dispatch function.
Args:
f: A probabilistic program.
name (str): A string name that is used to when tagging the output of `f`.
plate (str): A string named axis for this random variable's plate.
Returns:
A probabilistic program whose output is tagged with `name`.
"""
def wrapped(*args, **kwargs):
fun = f
if plate is not None:
fun = plate_util.make_plate(fun, name=plate)
if name is not None:
return random_variable(nest(fun, scope=name)(*args, **kwargs), name=name)
return fun(*args, **kwargs)
return wrapped
@functools.singledispatch
def log_prob(obj: object) -> LogProbFunction:
"""Returns a function that computes the log probability of a sample."""
raise NotImplementedError(f'`log_prob` not implemented for type: {type(obj)}')
@log_prob.register(types.FunctionType)
@log_prob.register(functools.partial)
def function_log_prob(f: Program) -> LogProbFunction:
"""Registers the `log_prob` for probabilistic programs.
See `core.interpreters.log_prob` for details of this function's
implementation.
Args:
f: A probabilitic program.
Returns:
A function that computes the log probability of a sample from the program.
"""
return lp.log_prob(f)
def joint_sample(f: Program) -> Program:
"""Returns a program that outputs a dictionary of latent random variable samples."""
return harvest.reap(f, tag=RANDOM_VARIABLE)
def joint_log_prob(f: Program) -> Scalar:
"""Returns a function that computes the log probability of all of a program's random variables."""
return log_prob(joint_sample(f))
def block(f: Program, names: Sequence[str]) -> Program:
"""Returns a program that removes the provided names from transformations."""
def program(key, *args, **kwargs):
return harvest.plant(
f,
tag=RANDOM_VARIABLE,
blocklist=names)({}, key, *args, **kwargs)
return program
def intervene(f: Program, **observations: Dict[str, Any]) -> Program:
"""Transforms a program into one where provided random variables are fixed.
`intervene` is a probabilistic program transformation that fixes the values
for certain random samples in an input program. A probabilistic program may
sample intermediate latent random variables while computing its output.
Observing those random variables converts them into deterministic constants
that are just used in the forward computation.
Random variables that are intervened are *no longer random variables*. This
means that if a variable `x` is intervened , it will no longer appear in the
`joint_sample` of a program and its `log_prob` will no longer be computed as
part of a program's `log_prob`.
## Examples:
### Simple usage:
```python
def model(key):
return random_variable(random.normal, name='x')(key)
intervene(model, x=1.)(random.PRNGKey(0)) # => 1.
```
### Multiple random variables:
```python
def model(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
intervene(model, z=1., x=1.)(random.PRNGKey(0)) # => 2.
```
Args:
f: A probabilistic program.
**observations: A dictionary mapping string names for random variables to
values.
Returns:
A probabilistic program that executes its input program with provided
variables fixed to their values.
"""
def wrapped(*args, **kwargs):
return harvest.plant(
f, tag=RANDOM_VARIABLE)(observations, *args, **kwargs)
return wrapped
def conditional(f: Program, names: Union[List[str], str]) -> Program:
"""Conditions a probabilistic program on random variables.
`conditional` is a probabilistic program transformation that converts latent
random variables into conditional inputs to the program. The random variables
that are moved to the input are specified via a list of names that correspond
to tagged random samples from the program. The final arguments to the output
program correspond to the list of names passed into `conditional`.
Random variables that are conditioned are *no longer random variables*. This
means that if a variable `x` is conditioned on, it will no longer appear in
the `joint_sample` of a program and its `log_prob` will no longer be computed
as part of a program's `log_prob`.
## Example:
```python
def model(key):
k1, k2 = random.split(key)
z = random_variable(random.normal, name='z')(k1)
return z + random_variable(random.normal, name='x')(k2)
conditional(model, ['z'])(random.PRNGKey(0), 0.) # => -1.25153887
conditional(model, ['z'])(random.PRNGKey(0), 1.) # => -0.25153887
conditional(model, ['z'. 'x'])(random.PRNGKey(0), 1., 2.) # => 3.
```
Args:
f: A probabilistic program.
names: A string or list of strings correspond to random variable names in
`f`.
Returns:
A probabilistic program with additional conditional inputs.
"""
if isinstance(names, str):
names = [names]
num_conditions = len(names)
def wrapped(*args, **kwargs):
if num_conditions > 0:
args, condition_values = args[:-num_conditions], args[-num_conditions:]
conditions = dict(safe_zip(names, condition_values))
else:
conditions = {}
return intervene(f, **conditions)(*args, **kwargs)
return wrapped
def graph_replace(f: Program, input_names: Union[List[str], str],
output_names: Union[List[str], str]) -> Program:
"""Transforms a program to one with new inputs and outputs.
`graph_replace` enables redefining the inputs and outputs of a probabilistic
program that samples latent random variables. It takes a program, along
with a list of input names and output names, and returns a function from
the random variables corresponding to the input names to the ones
corresponding to the output names.
Args:
f: A probabilistic program.
input_names: A string or list of strings that correspond to random
variables.
output_names: A string or list of strings that correspond to random
variables.
Returns:
A probabilistic program that maps the random variables corresponding to the
input names to those of the output names.
"""
if isinstance(output_names, str):
output_names = [output_names]
single_output = True
else:
single_output = False
def wrapped(*args, **kwargs):
latents = harvest.reap(
conditional(f, input_names), tag=RANDOM_VARIABLE)(*args, **kwargs)
outputs = [latents[name] for name in output_names]
latents = {
name: harvest.sow(value, tag=RANDOM_VARIABLE, name=name, mode='strict')
for name, value in latents.items()
if name not in output_names
}
if single_output:
outputs = outputs[0]
return primitive.tie_in(latents, outputs)
return wrapped
| python | 18,422 |
from dash.dependencies import Input, Output, State
from app import app
from carrotlayout import *
import carrotmysql
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'loss':
return loss_display()
elif tab == 'accuracy':
return accuracy_display()
elif tab == 'model':
return model_display()
elif tab == 'parameters':
return parameters_display()
elif tab == 'gradients':
return gradients_display()
# Setting to 0 initial value of the slider
@app.callback(Output('parameters-slider', 'value'),
[Input('parameters-dropdown', 'value')])
def update_output(input1):
return 0
@app.callback(Output('parameters-graph', 'figure'),
[Input('parameters-slider', 'value')],
[State('parameters-dropdown', 'value')])
def update_output(input1, input2):
arr = carrotmysql.query.batch_seq(input1)
return {
'data': [
{'x': arr[0],
'y': arr[1],
'z': arr[2],
'type': 'scatter3d', 'name': input2}
],
'layout': {
'title': input2
}
}
@app.callback(Output('parameters-slider', 'max'),
[Input('parameters-dropdown', 'value')])
def update_output(input1):
carrotmysql.query.search_layer(input1, parameter=True)
return carrotmysql.query.batch_size
# Setting to 0 initial value of the slider
@app.callback(Output('gradients-slider', 'value'),
[Input('gradients-dropdown', 'value')])
def update_output(input1):
return 0
@app.callback(Output('gradients-graph', 'figure'),
[Input('gradients-slider', 'value')],
[State('gradients-dropdown', 'value')])
def update_output(input1, input2):
arr = carrotmysql.query.batch_seq(input1)
return {
'data': [
{'x': arr[0],
'y': arr[1],
'z': arr[2],
'type': 'scatter3d', 'name': input2}
],
'layout': {
'title': input2
}
}
@app.callback(Output('gradients-slider', 'max'),
[Input('gradients-dropdown', 'value')])
def update_output(input1):
carrotmysql.query.search_layer(input1, parameter=False)
return carrotmysql.query.batch_size | python | 2,340 |
from time import perf_counter
from functools import wraps
from cachetools import TTLCache
from threading import RLock
from autumn import (
DEL_CMDS,
DEV_USERS,
DRAGONS,
SUPPORT_CHAT,
DEMONS,
TIGERS,
WOLVES,
dispatcher,
)
from telegram import Chat, ChatMember, ParseMode, Update
from telegram.ext import CallbackContext
# stores admemes in memory for 10 min.
ADMIN_CACHE = TTLCache(maxsize=512, ttl=60 * 10, timer=perf_counter)
THREAD_LOCK = RLock()
def is_whitelist_plus(chat: Chat, user_id: int, member: ChatMember = None) -> bool:
return any(user_id in user for user in [WOLVES, TIGERS, DEMONS, DRAGONS, DEV_USERS])
def is_support_plus(chat: Chat, user_id: int, member: ChatMember = None) -> bool:
return user_id in DEMONS or user_id in DRAGONS or user_id in DEV_USERS
def is_sudo_plus(chat: Chat, user_id: int, member: ChatMember = None) -> bool:
return user_id in DRAGONS or user_id in DEV_USERS
def is_user_admin(chat: Chat, user_id: int, member: ChatMember = None) -> bool:
if (
chat.type == "private"
or user_id in DRAGONS
or user_id in DEV_USERS
or chat.all_members_are_administrators
or user_id in [777000, 1087968824]
): # Count telegram and Group Anonymous as admin
return True
if not member:
with THREAD_LOCK:
# try to fetch from cache first.
try:
return user_id in ADMIN_CACHE[chat.id]
except KeyError:
# keyerror happend means cache is deleted,
# so query bot api again and return user status
# while saving it in cache for future useage...
chat_admins = dispatcher.bot.getChatAdministrators(chat.id)
admin_list = [x.user.id for x in chat_admins]
ADMIN_CACHE[chat.id] = admin_list
return user_id in admin_list
else:
return member.status in ("administrator", "creator")
def is_bot_admin(chat: Chat, bot_id: int, bot_member: ChatMember = None) -> bool:
if chat.type == "private" or chat.all_members_are_administrators:
return True
if not bot_member:
bot_member = chat.get_member(bot_id)
return bot_member.status in ("administrator", "creator")
def can_delete(chat: Chat, bot_id: int) -> bool:
return chat.get_member(bot_id).can_delete_messages
def is_user_ban_protected(chat: Chat, user_id: int, member: ChatMember = None) -> bool:
if (
chat.type == "private"
or user_id in DRAGONS
or user_id in DEV_USERS
or user_id in WOLVES
or user_id in TIGERS
or chat.all_members_are_administrators
or user_id in [777000, 1087968824]
): # Count telegram and Group Anonymous as admin
return True
if not member:
member = chat.get_member(user_id)
return member.status in ("administrator", "creator")
def is_user_in_chat(chat: Chat, user_id: int) -> bool:
member = chat.get_member(user_id)
return member.status not in ("left", "kicked")
def dev_plus(func):
@wraps(func)
def is_dev_plus_func(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user
if user.id in DEV_USERS:
return func(update, context, *args, **kwargs)
elif not user:
pass
elif DEL_CMDS and " " not in update.effective_message.text:
try:
update.effective_message.delete()
except:
pass
else:
update.effective_message.reply_text(
"This is a developer restricted command."
" You do not have permissions to run this.",
)
return is_dev_plus_func
def sudo_plus(func):
@wraps(func)
def is_sudo_plus_func(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and is_sudo_plus(chat, user.id):
return func(update, context, *args, **kwargs)
elif not user:
pass
elif DEL_CMDS and " " not in update.effective_message.text:
try:
update.effective_message.delete()
except:
pass
else:
update.effective_message.reply_text(
"Who dis non-admin telling me what to do? unataka ngumi?",
)
return is_sudo_plus_func
def support_plus(func):
@wraps(func)
def is_support_plus_func(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and is_support_plus(chat, user.id):
return func(update, context, *args, **kwargs)
elif DEL_CMDS and " " not in update.effective_message.text:
try:
update.effective_message.delete()
except:
pass
return is_support_plus_func
def whitelist_plus(func):
@wraps(func)
def is_whitelist_plus_func(
update: Update, context: CallbackContext, *args, **kwargs,
):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and is_whitelist_plus(chat, user.id):
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(
f"You don't have access to use this.\nVisit @{SUPPORT_CHAT}",
)
return is_whitelist_plus_func
def user_admin(func):
@wraps(func)
def is_admin(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and is_user_admin(chat, user.id):
return func(update, context, *args, **kwargs)
elif not user:
pass
elif DEL_CMDS and " " not in update.effective_message.text:
try:
update.effective_message.delete()
except:
pass
else:
update.effective_message.reply_text(
"Who dis non-admin telling me what to do? unataka ngumi?",
)
return is_admin
def user_admin_no_reply(func):
@wraps(func)
def is_not_admin_no_reply(
update: Update, context: CallbackContext, *args, **kwargs,
):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and is_user_admin(chat, user.id):
return func(update, context, *args, **kwargs)
elif not user:
pass
elif DEL_CMDS and " " not in update.effective_message.text:
try:
update.effective_message.delete()
except:
pass
return is_not_admin_no_reply
def user_not_admin(func):
@wraps(func)
def is_not_admin(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user
chat = update.effective_chat
if user and not is_user_admin(chat, user.id):
return func(update, context, *args, **kwargs)
elif not user:
pass
return is_not_admin
def bot_admin(func):
@wraps(func)
def is_admin(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
chat = update.effective_chat
update_chat_title = chat.title
message_chat_title = update.effective_message.chat.title
if update_chat_title == message_chat_title:
not_admin = "I'm not admin! - REEEEEE"
else:
not_admin = f"I'm not admin in <b>{update_chat_title}</b>! - REEEEEE"
if is_bot_admin(chat, bot.id):
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(not_admin, parse_mode=ParseMode.HTML)
return is_admin
def bot_can_delete(func):
@wraps(func)
def delete_rights(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
chat = update.effective_chat
update_chat_title = chat.title
message_chat_title = update.effective_message.chat.title
if update_chat_title == message_chat_title:
cant_delete = "I can't delete messages here!\nMake sure I'm admin and can delete other user's messages."
else:
cant_delete = f"I can't delete messages in <b>{update_chat_title}</b>!\nMake sure I'm admin and can delete other user's messages there."
if can_delete(chat, bot.id):
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(cant_delete, parse_mode=ParseMode.HTML)
return delete_rights
def can_pin(func):
@wraps(func)
def pin_rights(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
chat = update.effective_chat
update_chat_title = chat.title
message_chat_title = update.effective_message.chat.title
if update_chat_title == message_chat_title:
cant_pin = (
"I can't pin messages here!\nMake sure I'm admin and can pin messages."
)
else:
cant_pin = f"I can't pin messages in <b>{update_chat_title}</b>!\nMake sure I'm admin and can pin messages there."
if chat.get_member(bot.id).can_pin_messages:
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(cant_pin, parse_mode=ParseMode.HTML)
return pin_rights
def can_promote(func):
@wraps(func)
def promote_rights(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
chat = update.effective_chat
update_chat_title = chat.title
message_chat_title = update.effective_message.chat.title
if update_chat_title == message_chat_title:
cant_promote = "I can't promote/demote people here!\nMake sure I'm admin and can appoint new admins."
else:
cant_promote = (
f"I can't promote/demote people in <b>{update_chat_title}</b>!\n"
f"Make sure I'm admin there and can appoint new admins."
)
if chat.get_member(bot.id).can_promote_members:
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(cant_promote, parse_mode=ParseMode.HTML)
return promote_rights
def can_restrict(func):
@wraps(func)
def restrict_rights(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
chat = update.effective_chat
update_chat_title = chat.title
message_chat_title = update.effective_message.chat.title
if update_chat_title == message_chat_title:
cant_restrict = "I can't restrict people here!\nMake sure I'm admin and can restrict users."
else:
cant_restrict = f"I can't restrict people in <b>{update_chat_title}</b>!\nMake sure I'm admin there and can restrict users."
if chat.get_member(bot.id).can_restrict_members:
return func(update, context, *args, **kwargs)
else:
update.effective_message.reply_text(
cant_restrict, parse_mode=ParseMode.HTML,
)
return restrict_rights
def user_can_ban(func):
@wraps(func)
def user_is_banhammer(update: Update, context: CallbackContext, *args, **kwargs):
bot = context.bot
user = update.effective_user.id
member = update.effective_chat.get_member(user)
if (
not (member.can_restrict_members or member.status == "creator")
and user not in DRAGONS
and user not in [777000, 1087968824]
):
update.effective_message.reply_text(
"Sorry son, but you're not worthy to wield the banhammer.",
)
return ""
return func(update, context, *args, **kwargs)
return user_is_banhammer
def connection_status(func):
@wraps(func)
def connected_status(update: Update, context: CallbackContext, *args, **kwargs):
conn = connected(
context.bot,
update,
update.effective_chat,
update.effective_user.id,
need_admin=False,
)
if conn:
chat = dispatcher.bot.getChat(conn)
update.__setattr__("_effective_chat", chat)
return func(update, context, *args, **kwargs)
else:
if update.effective_message.chat.type == "private":
update.effective_message.reply_text(
"Send /connect in a group that you and I have in common first.",
)
return connected_status
return func(update, context, *args, **kwargs)
return connected_status
# Workaround for circular import with connection.py
from autumn.modules import connection
connected = connection.connected
| python | 13,223 |
import sys
from pyspark import SparkContext
from pyspark.conf import SparkConf
# Configuration
conf = SparkConf().setMaster("local[2]").setAppName("My app")
sc = SparkContext(conf=conf)
# Retourne le contenu ligne par ligne des fichiers dans un RDD
lines = sc.textFile(sys.argv[1])
# Recuperation du mois, de la temperature et de la qualite
values=lines.map(lambda lamb: (lamb[19:21], lamb[87:92], lamb[92]))\
# Filtrage sur la temperature et la qualite
.filter(lambda (mois, temp, qual) : int(temp) != 9999 and int(qual) in (0,1,4,5,9))\
# Map pour ne garder que le mois et la temperature
.map(lambda (mois, temp, qual): (mois, temp))\
# Reduce par mois en gardant la temperature max
.reduceByKey(max)\
# Tri par clef
.sortByKey()\
.collect()
print values | python | 761 |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="openbaton-cli",
version="6.0.0",
author="Open Baton",
author_email="[email protected]",
description="The Open Baton CLI",
license="Apache 2",
keywords="python vnfm nfvo open baton openbaton sdk cli rest",
url="http://openbaton.github.io/",
packages=find_packages(),
scripts=["openbaton", "openbaton-v2"],
install_requires=[
'requests',
'tabulate',
'argcomplete',
'configparser',
'asyncio',
'cliff',
],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
'Topic :: Software Development :: Build Tools',
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
entry_points={
'console_scripts': [
'openbaton = org.openbaton.cli.openbaton:start',
],
'openbaton.cmd': [
'nsd = org.openbaton.v2.nsd:Nsd',
'nsr = org.openbaton.v2.nsr:Nsr',
'project = org.openbaton.v2.projects:Projects',
'vim = org.openbaton.v2.vims:Vims',
'event = org.openbaton.v2.events:Events',
'package = org.openbaton.v2.packages:VnfPackages',
'user = org.openbaton.v2.users:Users',
'service = org.openbaton.v2.services:Services',
'market = org.openbaton.v2.market:Market',
'script = org.openbaton.v2.scripts:Scripts',
'key = org.openbaton.v2.keys:Keys',
]
}
)
| python | 1,710 |
#! /usr/bin/env python2
"""
I/O script to save and load the data coming with the MPI-Sintel low-level
computer vision benchmark.
For more details about the benchmark, please visit www.mpi-sintel.de
CHANGELOG:
v1.0 (2015/02/03): First release
Copyright (c) 2015 Jonas Wulff
Max Planck Institute for Intelligent Systems, Tuebingen, Germany
"""
# Requirements: Numpy as PIL/Pillow
import numpy as np
try:
import png
has_png = True
except:
has_png = False
png=None
# Check for endianness, based on Daniel Scharstein's optical flow code.
# Using little-endian architecture, these two should be equal.
TAG_FLOAT = 202021.25
TAG_CHAR = 'PIEH'
def flow_read(filename):
""" Read optical flow from file, return (U,V) tuple.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
f = open(filename,'rb')
check = np.fromfile(f,dtype=np.float32,count=1)[0]
assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)
width = np.fromfile(f,dtype=np.int32,count=1)[0]
height = np.fromfile(f,dtype=np.int32,count=1)[0]
size = width*height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)
tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))
u = tmp[:,np.arange(width)*2]
v = tmp[:,np.arange(width)*2 + 1]
return u,v
def flow_write(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
uv_ = np.array(uv)
assert(uv_.ndim==3)
if uv_.shape[0] == 2:
u = uv_[0,:,:]
v = uv_[1,:,:]
elif uv_.shape[2] == 2:
u = uv_[:,:,0]
v = uv_[:,:,1]
else:
raise UVError('Wrong format for flow input')
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def flow_read_png(fpath):
"""
Read KITTI optical flow, returns u,v,valid mask
"""
if not has_png:
print('Error. Please install the PyPNG library')
return
R = png.Reader(fpath)
width,height,data,_ = R.asDirect()
I = np.array(map(lambda x:x,data)).reshape((height,width,3))
u_ = I[:,:,0]
v_ = I[:,:,1]
valid = I[:,:,2]
u = (u_.astype('float64')-2**15)/64.0
v = (v_.astype('float64')-2**15)/64.0
return u,v,valid
def flow_write_png(u,v,fpath,valid=None):
"""
Write KITTI optical flow.
"""
if not has_png:
print('Error. Please install the PyPNG library')
return
if valid==None:
valid_ = np.ones(u.shape,dtype='uint16')
else:
valid_ = valid.astype('uint16')
u_ = ((u*64.0)+2**15).astype('uint16')
v_ = ((v*64.0)+2**15).astype('uint16')
I = np.dstack((u_,v_,valid_))
W = png.Writer(width=u.shape[1],
height=u.shape[0],
bitdepth=16,
planes=3)
with open(fpath,'wb') as fil:
W.write(fil,I.reshape((-1,3*u.shape[1])))
| python | 3,662 |
from anime_downloader import config
from anime_downloader.sites import get_anime_class
import os
import sys
import json
import logging
import click
import warnings
from time import time
# Don't warn if not using fuzzywuzzy[speedup]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from fuzzywuzzy import process
class Watcher:
WATCH_FILE = os.path.join(config.APP_DIR, 'watch.json')
def __init__(self):
pass
def new(self, url):
AnimeInfo = self._get_anime_info_class(url)
anime = AnimeInfo(url, timestamp=time())
self._append_to_watch_file(anime)
logging.info('Added {:.50} to watch list.'.format(anime.title))
return anime
def list(self):
animes = self._read_from_watch_file()
click.echo('{:>5} | {:^35} | {:^8} | {:^10}'.format(
'SlNo', 'Name', 'Eps', 'Type'
))
click.echo('-'*65)
fmt_str = '{:5} | {:35.35} | {:3}/{:<3} | {meta:10.10}'
for idx, anime in enumerate(animes):
meta = anime.meta
click.echo(fmt_str.format(idx+1, anime.title,
*anime.progress(),
meta=meta.get('Type', '')))
def anime_list(self):
return self._read_from_watch_file()
def get(self, anime_name):
animes = self._read_from_watch_file()
if isinstance(anime_name, int):
return animes[anime_name]
match = process.extractOne(anime_name, animes, score_cutoff=40)
if match:
anime = match[0]
logging.debug('Anime: {!r}, episodes_done: {}'.format(
anime, anime.episodes_done))
if (time() - anime._timestamp) > 4*24*60*60:
anime = self.update_anime(anime)
return anime
def update_anime(self, anime):
if not hasattr(anime, 'meta') or not anime.meta.get('Status') or \
anime.meta['Status'].lower() == 'airing':
logging.info('Updating anime {}'.format(anime.title))
AnimeInfo = self._get_anime_info_class(anime.url)
newanime = AnimeInfo(anime.url, episodes_done=anime.episodes_done,
timestamp=time())
newanime.title = anime.title
self.update(newanime)
return newanime
return anime
def add(self, anime):
self._append_to_watch_file(anime)
def remove(self, anime):
anime_name = anime.title
animes = self._read_from_watch_file()
animes = [anime for anime in animes if anime.title != anime_name]
self._write_to_watch_file(animes)
def update(self, changed_anime):
animes = self._read_from_watch_file()
animes = [anime for anime in animes
if anime.title != changed_anime.title]
animes = [changed_anime] + animes
self._write_to_watch_file(animes)
def _append_to_watch_file(self, anime):
if not os.path.exists(self.WATCH_FILE):
self._write_to_watch_file([anime])
return
data = self._read_from_watch_file()
data = [anime] + data
self._write_to_watch_file(data)
def _write_to_watch_file(self, animes):
animes = [anime.__dict__ for anime in animes]
with open(self.WATCH_FILE, 'w') as watch_file:
json.dump(animes, watch_file)
def _read_from_watch_file(self):
if not os.path.exists(self.WATCH_FILE):
logging.error('Add something to watch list first.')
sys.exit(1)
with open(self.WATCH_FILE, 'r') as watch_file:
data = json.load(watch_file)
ret = []
for anime_dict in data:
# For backwards compatibility
if '_episodeIds' in anime_dict:
anime_dict['_episode_urls'] = anime_dict['_episodeIds']
AnimeInfo = self._get_anime_info_class(anime_dict['url'])
anime = AnimeInfo(_skip_online_data=True)
anime.__dict__ = anime_dict
ret.append(anime)
return ret
def _get_anime_info_class(self, url):
cls = get_anime_class(url)
# TODO: Maybe this is better off as a mixin
class AnimeInfo(cls):
def __init__(self, *args, **kwargs):
self.episodes_done = kwargs.pop('episodes_done', 0)
self._timestamp = kwargs.pop('timestamp', 0)
super(cls, self).__init__(*args, **kwargs)
def progress(self):
return (self.episodes_done, len(self))
return AnimeInfo
| python | 4,632 |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.cluster_time_extended_extended import ClusterTimeExtendedExtended # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestClusterTimeExtendedExtended(unittest.TestCase):
"""ClusterTimeExtendedExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterTimeExtendedExtended(self):
"""Test ClusterTimeExtendedExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.cluster_time_extended_extended.ClusterTimeExtendedExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| python | 1,002 |
"""
Django settings for expressive project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6b#!*eyo9r3p^31f0h*n2#&^yxzrifc(%=ohtct&imt4d7mm-='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'lexicon.apps.LexiconConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'expressive.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'expressive.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| python | 3,137 |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for creating and removing optimizer-specific flags."""
import collections
import contextlib
import inspect
from typing import Any, Callable, Dict, Iterable, List, Optional
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_addons.optimizers as tfao
from utils.optimizers import lars
from utils.optimizers import shampoo
from utils.optimizers import yogi
_all_hparam_flags = []
@contextlib.contextmanager
def record_hparam_flags():
"""A context manager that yields all flags created in its scope.
This is useful for defining hyperparameter flags of an experiment, especially
when the flags are partitioned across a number of modules. The total list of
flags defined across modules can then be accessed via get_hparam_flags().
Example usage:
```python
with record_hparam_flags() as optimizer_hparam_flags:
flags.DEFINE_string('optimizer', 'sgd', 'Optimizer for training.')
with record_hparam_flags() as evaluation_hparam_flags:
flags.DEFINE_string('eval_metric', 'accuracy', 'Metric for evaluation.')
experiment_hparam_flags = get_hparam_flags().
```
Check `research/optimization/emnist/run_emnist.py` for more usage details.
Yields:
A list of all newly created flags.
"""
old_flags = set(iter(flags.FLAGS))
new_flags = []
yield new_flags
new_flags.extend([f for f in flags.FLAGS if f not in old_flags])
_all_hparam_flags.extend(new_flags)
def lookup_flag_values(flag_list: Iterable[str]) -> collections.OrderedDict:
"""Returns a dictionary of (flag_name, flag_value) for a list of flags."""
flag_odict = collections.OrderedDict()
for flag_name in flag_list:
if not isinstance(flag_name, str):
raise ValueError(
'All flag names must be strings. Flag {} was of type {}.'.format(
flag_name, type(flag_name)))
if flag_name not in flags.FLAGS:
raise ValueError('"{}" is not a defined flag.'.format(flag_name))
flag_odict[flag_name] = flags.FLAGS[flag_name].value
return flag_odict
def _optimizer_canonical_name(optimizer_cls):
"""Return a short, canonical name for an optimizer for us in flags."""
return optimizer_cls.__name__.lower()
# List of optimizers currently supported.
_SUPPORTED_OPTIMIZERS_CLS = [
tf.keras.optimizers.SGD,
tf.keras.optimizers.Adagrad,
tf.keras.optimizers.Adam,
yogi.Yogi,
lars.LARS,
tfao.lamb.LAMB,
shampoo.Shampoo,
]
_SUPPORTED_OPTIMIZERS = {
_optimizer_canonical_name(cls): cls for cls in _SUPPORTED_OPTIMIZERS_CLS
}
def define_optimizer_flags(prefix: str) -> None:
"""Defines flags with `prefix` to configure an optimizer.
This method is inteded to be paired with `create_optimizer_from_flags` using
the same `prefix`, to allow Python binaries to constructed TensorFlow
optimizers parameterized by commandline flags.
This creates two new flags:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to a suite of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, given the prefix "client" this will create flags (non-exhaustive
list):
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
Then calls to `create_optimizer_from_flags('client')` will construct an
optimizer of the type named in `--client_optimizer`, parameterized by the
flags prefixed with the matching optimizer name. For example, if
`--client_optimizer=sgd`, `--client_sgd_*` flags will be used.
IMPORTANT: For flags to be correctly parsed from the commandline, this method
must be called before `absl.app.run(main)`, and is recommened to be called
next to other flag definitions at the top of a py_binary.
Args:
prefix: A string (possibly empty) indicating which optimizer is being
configured.
"""
# Create top-level, non-optimizer specific flags for picking the optimizer
# type and the learning rate.
flags.DEFINE_enum(
name='{!s}_optimizer'.format(prefix),
default=None,
enum_values=list(_SUPPORTED_OPTIMIZERS.keys()),
help='The type of optimizer to construct for `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_optimizer'.format(prefix))
flags.DEFINE_float(
name='{!s}_learning_rate'.format(prefix),
default=None,
help='Base learning rate for optimizer `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_learning_rate'.format(prefix))
for optimizer_name, optimizer_cls in _SUPPORTED_OPTIMIZERS.items():
# Pull out the constructor parameters except for `self`.
constructor_signature = inspect.signature(optimizer_cls.__init__)
constructor_params = list(constructor_signature.parameters.values())[1:]
def prefixed(basename, optimizer_name=optimizer_name):
if prefix:
return '{!s}_{!s}_{!s}'.format(prefix, optimizer_name, basename)
else:
return '{!s}_{!s}'.format(optimizer_name, basename)
def is_param_of_type(param, typ):
return (param.default is None and param.annotation == Optional[typ] or
isinstance(param.default, typ))
for param in constructor_params:
if param.name in ['kwargs', 'args', 'learning_rate']:
continue
if is_param_of_type(param, bool):
define_flag_fn = flags.DEFINE_bool
elif is_param_of_type(param, float):
define_flag_fn = flags.DEFINE_float
elif is_param_of_type(param, int):
define_flag_fn = flags.DEFINE_integer
elif is_param_of_type(param, str):
define_flag_fn = flags.DEFINE_string
elif is_param_of_type(param, List[str]):
define_flag_fn = flags.DEFINE_multi_string
else:
raise NotImplementedError('Cannot define flag [{!s}] '
'for parameter [{!s}] of type [{!s}] '
'(default value type [{!s}]) '
'on optimizer [{!s}]'.format(
prefixed(param.name),
param.name, param.annotation,
type(param.default), optimizer_name))
define_flag_fn(
name=prefixed(param.name),
default=param.default,
help='{!s} argument for the {!s} optimizer.'.format(
param.name, optimizer_name))
logging.info('Defined new flag: [%s]', prefixed(param.name))
def create_optimizer_fn_from_flags(
prefix: str) -> Callable[..., tf.keras.optimizers.Optimizer]:
"""Returns an optimizer function based on prefixed flags.
This method is inteded to be paired with `define_optimizer_flags` using the
same `prefix`, to allow Python binaries to constructed TensorFlow optimizers
parameterized by commandline flags.
This method expects at least two flags to have been defined and set:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to suites of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, if `prefix='client'` this method first reads the flags:
* `--client_optimizer`
* `--client_learning_rate`
If the optimizer flag is `'sgd'`, then a `tf.keras.optimizer.SGD` optimizer is
constructed using the values in the flags prefixed with `--client_sgd_`.
Args:
prefix: The same string prefix passed to `define_optimizer_flags`.
Returns:
A 1-arg function that accepts a learning rate and returns a
`tf.keras.optimizers.Optimizer`.
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
optimizer_flag_name = prefixed('optimizer')
if flags.FLAGS[optimizer_flag_name] is None:
raise ValueError('Must specify flag --{!s}'.format(optimizer_flag_name))
optimizer_name = flags.FLAGS[optimizer_flag_name].value
optimizer_cls = _SUPPORTED_OPTIMIZERS.get(optimizer_name)
if optimizer_cls is None:
# To support additional optimizers, implement it as a
# `tf.keras.optimizers.Optimizer` and add to the `_SUPPORTED_OPTIMIZERS`
# dict.
logging.error(
'Unknown optimizer [%s], known optimziers are [%s]. To add '
'support for an optimizer, add the optimzier class to the '
'flag_utils._SUPPORTED_OPTIMIZERS list.', optimizer_name,
list(_SUPPORTED_OPTIMIZERS.keys()))
raise ValueError('`{!s}` is not a valid optimizer for flag --{!s}, must be '
'one of {!s}. See error log for details.'.format(
optimizer_name, optimizer_flag_name,
list(_SUPPORTED_OPTIMIZERS.keys())))
def _has_user_value(flag):
"""Check if a commandline flag has a user set value."""
return flag.present or flag.value != flag.default
# Validate that the optimizers that weren't picked don't have flag values set.
# Settings that won't be used likely means there is an expectation gap between
# the user and the system and we should notify them.
unused_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
mistakenly_set_flags = []
for flag_name in flags.FLAGS:
if not _has_user_value(flags.FLAGS[flag_name]):
# Flag was not set by the user, skip it.
continue
# Otherwise the flag has a value set by the user.
for unused_prefix in unused_flag_prefixes:
if flag_name.startswith(f'{unused_prefix}_'):
mistakenly_set_flags.append(flag_name)
break
if mistakenly_set_flags:
raise ValueError('Commandline flags for optimizers other than [{!s}] '
'(value of --{!s}) are set. These would be ignored, '
'were the flags set by mistake? Flags: {!s}'.format(
optimizer_name, optimizer_flag_name,
mistakenly_set_flags))
lr_flag_name = prefixed('learning_rate')
lr_flag = flags.FLAGS[lr_flag_name]
if _has_user_value(lr_flag):
default_lr = lr_flag.value
else:
raise ValueError(
'Learning rate for {!s} must be set by the flag --{!s} .'.format(
prefix, lr_flag_name))
flag_prefix = prefixed(optimizer_name)
prefix_len = len(flag_prefix) + 1
kwargs = {}
for flag_name in flags.FLAGS:
if not flag_name.startswith(f'{flag_prefix}_'):
continue
arg_name = flag_name[prefix_len:]
kwargs[arg_name] = flags.FLAGS[flag_name].value
if 'learning_rate' in kwargs:
kwargs.pop('learning_rate')
def optimizer_fn(learning_rate=default_lr):
return optimizer_cls(learning_rate=learning_rate, **kwargs)
return optimizer_fn
def remove_unused_optimizer_flags(
prefix: str, hparam_dict: Dict[str, Any]) -> collections.OrderedDict:
"""Removes unused optimizer flags with a given prefix.
This method is intended to be used with `define_optimizer_flags`, and is used
to remove elements of hparam_dict associated with unused optimizer flags.
For example, given the prefix "client", define_optimizer_flags will create
flags including:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
and other such flags.
However, for purposes of recording hyperparameters, we would like to only keep
those that correspond to the optimizer selected in the flag
--client_optimizer. This method is intended to remove the unused flags.
For example, if `--client_optimizer=sgd` was set, then calling this method
with the prefix `client` will remove all pairs in hparam_dict except those
associated with the flags:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
Args:
prefix: The prefix used to define optimizer flags, such as via
`flag_utils.define_optimizer_flags(prefix)`. Standard examples include
`prefix=client` and `prefix=server`.
hparam_dict: A dictionary of (string, value) pairs corresponding to
experiment hyperparameters.
Returns:
An ordered dictionary of (string, value) pairs from hparam_dict that omits
any pairs where string = "<prefix>_<optimizer>*" but <optimizer> is not the
one set via the flag --<prefix>_optimizer=...
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
if prefixed('optimizer') not in hparam_dict.keys():
raise ValueError('The flag {!s} was not defined.'.format(
prefixed('optimizer')))
optimizer_name = hparam_dict[prefixed('optimizer')]
if not optimizer_name:
raise ValueError('The flag {!s} was not set. Unable to determine the '
'relevant optimizer.'.format(prefixed('optimizer')))
unused_optimizer_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
def _is_used_flag(flag_name):
# We filter by whether the flag contains an unused optimizer prefix.
# This retains any flag not of the form <prefix>_<optimizer>_*.
for unused_flag_prefix in unused_optimizer_flag_prefixes:
if flag_name.startswith(f'{unused_flag_prefix}_'):
return False
return True
used_flags = collections.OrderedDict()
for (flag_name, flag_value) in hparam_dict.items():
if _is_used_flag(flag_name):
used_flags[flag_name] = flag_value
return used_flags
| python | 14,205 |
from flask import Blueprint, abort
from flask_login import current_user
from catchat.extensions import db
from catchat.models import User
admin_bp = Blueprint('admin', __name__)
@admin_bp.route('/block/<int:user_id>', methods=['DELETE'])
def block_user(user_id):
if not current_user.is_admin:
abort(403)
user = User.query.get_or_404(user_id)
if user.is_admin:
abort(400) #请求无效
db.session.delete(user)
db.session.commit()
return '', 204 #没有消息体,只表示请求成功 | python | 495 |
from flask import Blueprint
auth_bp = Blueprint('auth', __name__)
| python | 68 |
# 4874
# .+\.([^.]+)$
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"1"*20000+"◎@! _1SLQ_1"
import re2 as re
from time import perf_counter
regex = """.+\.([^.]+)$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 10000 + "◎@! _1SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | python | 472 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys,os
import mxnet as mx
import numpy as np
import time
import math
import data_helpers
from collections import namedtuple
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) # get a logger to accuracies are printed
logs = sys.stderr
CNNModel = namedtuple("CNNModel", ['cnn_exec', 'symbol', 'data', 'label', 'param_blocks'])
def make_text_cnn(sentence_size, num_embed, batch_size, vocab_size,
num_label=2, filter_list=[3, 4, 5], num_filter=100,
dropout=0., with_embedding=True):
input_x = mx.sym.Variable('data') # placeholder for input
input_y = mx.sym.Variable('softmax_label') # placeholder for output
# embedding layer
if not with_embedding:
embed_layer = mx.sym.Embedding(data=input_x, input_dim=vocab_size, output_dim=num_embed, name='vocab_embed')
conv_input = mx.sym.Reshape(data=embed_layer, target_shape=(batch_size, 1, sentence_size, num_embed))
else:
conv_input = input_x
# create convolution + (max) pooling layer for each filter operation
pooled_outputs = []
for i, filter_size in enumerate(filter_list):
convi = mx.sym.Convolution(data=conv_input, kernel=(filter_size, num_embed), num_filter=num_filter)
relui = mx.sym.Activation(data=convi, act_type='relu')
pooli = mx.sym.Pooling(data=relui, pool_type='max', kernel=(sentence_size - filter_size + 1, 1), stride=(1,1))
pooled_outputs.append(pooli)
# combine all pooled outputs
total_filters = num_filter * len(filter_list)
concat = mx.sym.Concat(*pooled_outputs, dim=1)
h_pool = mx.sym.Reshape(data=concat, target_shape=(batch_size, total_filters))
# dropout layer
if dropout > 0.0:
h_drop = mx.sym.Dropout(data=h_pool, p=dropout)
else:
h_drop = h_pool
# fully connected
cls_weight = mx.sym.Variable('cls_weight')
cls_bias = mx.sym.Variable('cls_bias')
fc = mx.sym.FullyConnected(data=h_drop, weight=cls_weight, bias=cls_bias, num_hidden=num_label)
# softmax output
sm = mx.sym.SoftmaxOutput(data=fc, label=input_y, name='softmax')
return sm
def setup_cnn_model(ctx, batch_size, sentence_size, num_embed, vocab_size,
dropout=0.5, initializer=mx.initializer.Uniform(0.1), with_embedding=True):
cnn = make_text_cnn(sentence_size, num_embed, batch_size=batch_size,
vocab_size=vocab_size, dropout=dropout, with_embedding=with_embedding)
arg_names = cnn.list_arguments()
input_shapes = {}
if with_embedding:
input_shapes['data'] = (batch_size, 1, sentence_size, num_embed)
else:
input_shapes['data'] = (batch_size, sentence_size)
arg_shape, out_shape, aux_shape = cnn.infer_shape(**input_shapes)
arg_arrays = [mx.nd.zeros(s, ctx) for s in arg_shape]
args_grad = {}
for shape, name in zip(arg_shape, arg_names):
if name in ['softmax_label', 'data']: # input, output
continue
args_grad[name] = mx.nd.zeros(shape, ctx)
cnn_exec = cnn.bind(ctx=ctx, args=arg_arrays, args_grad=args_grad, grad_req='add')
param_blocks = []
arg_dict = dict(zip(arg_names, cnn_exec.arg_arrays))
for i, name in enumerate(arg_names):
if name in ['softmax_label', 'data']: # input, output
continue
initializer(name, arg_dict[name])
param_blocks.append( (i, arg_dict[name], args_grad[name], name) )
out_dict = dict(zip(cnn.list_outputs(), cnn_exec.outputs))
data = cnn_exec.arg_dict['data']
label = cnn_exec.arg_dict['softmax_label']
return CNNModel(cnn_exec=cnn_exec, symbol=cnn, data=data, label=label, param_blocks=param_blocks)
def train_cnn(model, X_train_batch, y_train_batch, X_dev_batch, y_dev_batch, batch_size,
optimizer='rmsprop', max_grad_norm=5.0, learning_rate=0.0005, epoch=200):
m = model
# create optimizer
opt = mx.optimizer.create(optimizer)
opt.lr = learning_rate
updater = mx.optimizer.get_updater(opt)
for iteration in range(epoch):
tic = time.time()
num_correct = 0
num_total = 0
for begin in range(0, X_train_batch.shape[0], batch_size):
batchX = X_train_batch[begin:begin+batch_size]
batchY = y_train_batch[begin:begin+batch_size]
if batchX.shape[0] != batch_size:
continue
m.data[:] = batchX
m.label[:] = batchY
# forward
m.cnn_exec.forward(is_train=True)
# backward
m.cnn_exec.backward()
# eval on training data
num_correct += sum(batchY == np.argmax(m.cnn_exec.outputs[0].asnumpy(), axis=1))
num_total += len(batchY)
# update weights
norm = 0
for idx, weight, grad, name in m.param_blocks:
grad /= batch_size
l2_norm = mx.nd.norm(grad).asscalar()
norm += l2_norm * l2_norm
norm = math.sqrt(norm)
for idx, weight, grad, name in m.param_blocks:
if norm > max_grad_norm:
grad *= (max_grad_norm / norm)
updater(idx, grad, weight)
# reset gradient to zero
grad[:] = 0.0
# decay learning rate
if iteration % 50 == 0 and iteration > 0:
opt.lr *= 0.5
print('reset learning rate to %g' % opt.lr,file=logs)
# end of training loop
toc = time.time()
train_time = toc - tic
train_acc = num_correct * 100 / float(num_total)
# saving checkpoint
if (iteration + 1) % 10 == 0:
prefix = 'cnn'
m.symbol.save('checkpoint/%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) :v for k, v in m.cnn_exec.arg_dict.items()}
save_dict.update({('aux:%s' % k) : v for k, v in m.cnn_exec.aux_dict.items()})
param_name = 'checkpoint/%s-%04d.params' % (prefix, iteration)
mx.nd.save(param_name, save_dict)
print('Saved checkpoint to %s' % param_name,file=logs)
# evaluate on dev set
num_correct = 0
num_total = 0
for begin in range(0, X_dev_batch.shape[0], batch_size):
batchX = X_dev_batch[begin:begin+batch_size]
batchY = y_dev_batch[begin:begin+batch_size]
if batchX.shape[0] != batch_size:
continue
m.data[:] = batchX
m.cnn_exec.forward(is_train=False)
num_correct += sum(batchY == np.argmax(m.cnn_exec.outputs[0].asnumpy(), axis=1))
num_total += len(batchY)
dev_acc = num_correct * 100 / float(num_total)
print('Iter [%d] Train: Time: %.3fs, Training Accuracy: %.3f \
--- Dev Accuracy thus far: %.3f' % (iteration, train_time, train_acc, dev_acc), file=logs)
def main():
print('Loading data...')
# word2vec = data_helpers.load_google_word2vec('data/GoogleNews-vectors-negative300.bin')
word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec')
x, y = data_helpers.load_data_with_word2vec(word2vec)
# randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# split train/dev set
x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
print('Train/Dev split: %d/%d' % (len(y_train), len(y_dev)))
print('train shape:', x_train.shape)
print('dev shape:', x_dev.shape)
# reshpae for convolution input
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
x_dev = np.reshape(x_dev, (x_dev.shape[0], 1, x_dev.shape[1], x_dev.shape[2]))
num_embed = x_train.shape[-1]
sentence_size = x_train.shape[2]
print('sentence max words', sentence_size)
print('embedding size', num_embed)
batch_size = 50
cnn_model = setup_cnn_model(mx.gpu(1), batch_size, sentence_size, num_embed, dropout=0.5)
train_cnn(cnn_model, x_train, y_train, x_dev, y_dev, batch_size)
def train_without_pretrained_embedding():
x, y, vocab, vocab_inv = data_helpers.load_data()
vocab_size = len(vocab)
# randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# split train/dev set
x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
print('Train/Dev split: %d/%d' % (len(y_train), len(y_dev)))
print('train shape:', x_train.shape)
print('dev shape:', x_dev.shape)
print('vocab_size', vocab_size)
batch_size = 50
num_embed = 300
sentence_size = x_train.shape[1]
print('batch size', batch_size)
print('sentence max words', sentence_size)
print('embedding size', num_embed)
cnn_model = setup_cnn_model(mx.gpu(0), batch_size, sentence_size, num_embed, vocab_size, dropout=0.5, with_embedding=False)
train_cnn(cnn_model, x_train, y_train, x_dev, y_dev, batch_size)
if __name__ == '__main__':
if not os.path.exists("checkpoint"):
os.path.mkdir("checkpoint")
train_without_pretrained_embedding()
| python | 9,449 |
import csv
import os
MINIMAL_RATING_THRESHOLD = 30
SOURCE_DATA_ROOT = "C:\\RS\\Amazon\\All\\"
def check_duplicate_ratings(file_to_check):
users_items = set()
f = open(file_to_check, 'rt')
try:
reader = csv.reader(f)
for row in reader:
if row[0] not in users_items:
users_items.add(row[0] + row[1])
else:
raise "DUPLICATE FOUND:" + row[0] + row[1]
finally:
f.close()
print('no duplicates')
for filename in os.listdir(SOURCE_DATA_ROOT):
if filename.endswith(".csv") and filename.startswith('ratings_'):
print('checking {}:'.format(filename))
check_duplicate_ratings(os.path.join(SOURCE_DATA_ROOT, filename))
else:
continue
| python | 757 |
from .core.constraint import CompoundConstraint
from .core.problem import Problem
from .core.problem import Objective
from .core.constants import Optimization
from .core.solver import AbsSolver
__import__('pkg_resources').declare_namespace(__name__)
| python | 252 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from instancehook import InstanceHook
from dhcpdns import DhcpDns
| python | 858 |
"""
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
"""
import cimodel.lib.miniutils as miniutils
import cimodel.data.simple.util.branch_filters
from cimodel.data.simple.util.docker_constants import (
DOCKER_IMAGE_ASAN,
DOCKER_REQUIREMENT_ASAN,
DOCKER_IMAGE_NDK,
DOCKER_REQUIREMENT_NDK
)
class MobileJob:
def __init__(
self,
docker_image,
docker_requires,
variant_parts,
is_master_only=False):
self.docker_image = docker_image
self.docker_requires = docker_requires
self.variant_parts = variant_parts
self.is_master_only = is_master_only
def gen_tree(self):
non_phase_parts = [
"pytorch",
"linux",
"xenial",
"py3",
"clang5",
"mobile",
] + self.variant_parts
full_job_name = "_".join(non_phase_parts)
build_env_name = "-".join(non_phase_parts)
props_dict = {
"build_environment": build_env_name,
"build_only": miniutils.quote(str(int(True))),
"docker_image": self.docker_image,
"requires": self.docker_requires,
"name": full_job_name,
}
if self.is_master_only:
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
return [{"pytorch_linux_build": props_dict}]
WORKFLOW_DATA = [
MobileJob(
DOCKER_IMAGE_ASAN,
[DOCKER_REQUIREMENT_ASAN],
["build"]
),
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
MobileJob(
DOCKER_IMAGE_NDK,
[DOCKER_REQUIREMENT_NDK],
["custom", "build", "dynamic"]
),
MobileJob(
DOCKER_IMAGE_NDK,
[DOCKER_REQUIREMENT_NDK],
["custom", "build", "static"]
),
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
MobileJob(
DOCKER_IMAGE_NDK,
[DOCKER_REQUIREMENT_NDK],
["code", "analysis"],
True
),
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
| python | 2,223 |
from collections import OrderedDict
params = OrderedDict()
###
# Name of this file
settings_name = 'Skyrmions'
settings_file = __file__
###
# Executable arguments
# key will be name of parameter
# 'def' is the default value
# 'descr' is the description
params['Nx'] = {'def': '30', 'descr': 'cell width [atoms]'}
params['Ny'] = {'def': '30', 'descr': 'cell height [atoms]'}
params['steps'] = {'def': '1e5', 'descr': 'simulation steps number'}
params['every_step'] = {'def': '1000', 'descr': 'periods to write on disk. -1 means only last. -2 means not at all'}
params['J1'] = {'def': '5.7', 'descr': '[meV] see PRL 117 207202'}
params['J2'] = {'def': '-0.84', 'descr': '[meV]'}
params['J3'] = {'def': '-1.45', 'descr': '[meV]'}
params['J4'] = {'def': '-0.06', 'descr': '[meV]'}
params['J5'] = {'def': '0.2', 'descr': '[meV]'}
params['J6'] = {'def': '0.2', 'descr': '[meV]'}
params['J7'] = {'def': '-0.2', 'descr': '[meV]'}
params['J8'] = {'def': '0.5', 'descr': '[meV]'}
params['K4'] = {'def': '-1.8', 'descr': '[meV]'}
params['D'] = {'def': '-1.05', 'descr': '[meV]'}
params['B'] = {'def': '-0.2', 'descr': '[meV]'}
params['K'] = {'def': '-0.8', 'descr': '[meV]'}
params['T'] = {'def': '1', 'descr': '[K]'}
params['seed'] = {'def': '0', 'descr': 'seed for PRNG. 0 means random'}
###
# Below are keys for GNU Parallel/executable choosing/etc = not executable arguments
# Their number is `launch_parameters_count`
params['iterations'] = {'def': '1', 'descr': ''}
###
# Number of jobs to run simultaneously.
# Will be passed to GNU Parallel. Empty means utilize every CPU.
params['jobs'] = {'def': '', 'descr': ''}
###
# If 'y' then --shuf will be passed to GNU Parallel to shuffle jobs
params['shuffle'] = {'def': 'n', 'descr': ''}
###
# If 'y' then an ampersand (&) will be added on the end of the command to run in background
# If 'n' then progressbar and ETA will be shown (--eta and --progress)
params['background'] = {'def': 'y', 'descr': 'work in bg? y / n'}
###
# Correct order of all parameters that go into the command
# Only main executable arguments are allowed here
right_order = ['Nx', 'Ny', 'steps', 'every_step', 'J1', 'J2', 'J3', 'J4', 'J5', 'J6', 'J7', 'J8', 'K4', 'D', 'B', 'K', 'T', 'seed']
###
# List of parameters that will need expansion during command constructing
# Expansion means a:b:c --> a a+b a+2b ... c
# Example: '10:5:50' --> '10 15 20 25 30 35 40 45 50'
need_expansion = ['J1', 'J2', 'J3', 'J4', 'J5', 'J6', 'J7', 'J8', 'K4', 'D', 'B', 'K', 'T']
###
# Main executable file to call
executable = './main'
###
# Additional keys to pass to GNU Parallel
additional_parallel_keys = ''
###
# Name of filename to write the command to
out_filename = 'command.sh'
###
# Simple check that lists above were updated on adding new parameter
# Number of parameters that don't go into the command
launch_parameters_count = 4
try:
assert len(params.keys()) == len(right_order) + launch_parameters_count
except AssertionError as e:
print('*** You may have forgotten to update parameters in', __file__)
print('If not then review launch_parameters_count inside that file\n')
raise e
| python | 3,600 |
'''This file configures the training procedure because handling arguments in every single function is so exhaustive for
research purposes. Don't try this code if you are a software engineer.'''
# device settings
device = 'cuda' # or 'cpu'
# data settings
dataset_path = "data/images" # parent directory of datasets
class_name = "dummy_data" # dataset subdirectory
modelname = "dummy_test" # export evaluations/logs with this name
pre_extracted = True # were feature preextracted with extract_features?
img_size = (768, 768) # image size of highest scale, others are //2, //4
img_dims = [3] + list(img_size)
# transformation settings
norm_mean, norm_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
# network hyperparameters
n_scales = 3 # number of scales at which features are extracted, img_size is the highest - others are //2, //4,...
clamp = 3 # clamping parameter
max_grad_norm = 1e0 # clamp gradients to this norm
n_coupling_blocks = 4 # higher = more flexible = more unstable
fc_internal = 1024 # * 4 # number of neurons in hidden layers of s-t-networks
lr_init = 2e-4 # inital learning rate
use_gamma = True
extractor = "effnetB5" # feature dataset name (which was used in 'extract_features.py' as 'export_name')
n_feat = {"effnetB5": 304}[extractor] # dependend from feature extractor
map_size = (img_size[0] // 12, img_size[1] // 12)
# dataloader parameters
batch_size = 16 # actual batch size is this value multiplied by n_transforms(_test)
kernel_sizes = [3] * (n_coupling_blocks - 1) + [5]
# total epochs = meta_epochs * sub_epochs
# evaluation after <sub_epochs> epochs
meta_epochs = 4 # total epochs = meta_epochs * sub_epochs
sub_epochs = 60 # evaluate after this number of epochs
# output settings
verbose = True
hide_tqdm_bar = True
save_model = True
| python | 1,842 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="din_model", # Replace with your own username
version="1.0.0",
author="Reza Adibniar",
author_email="[email protected]",
description="All the packages required for running predictor pipeline",
long_description=long_description,
long_description_content_type="",
url="https://github.com/Futurewei-io/blue-marlin",
packages=setuptools.find_packages(exclude=['data','datagen']),
license="Apache License 2.0",
classifiers=[
"Programming Language :: Python :: 3",
"License :: Apache License 2.0",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| python | 752 |
#!/usr/bin/env python3
import numpy as np
Normal = __import__('normal').Normal
np.random.seed(0)
data = np.random.normal(70, 10, 100).tolist()
n1 = Normal(data)
print('PSI(90):', n1.pdf(90))
n2 = Normal(mean=70, stddev=10)
print('PSI(90):', n2.pdf(90))
| python | 256 |
#!/usr/bin/env python3
'''An alternative way to draw parallels and meridians with basemap.
Basemap is a toolkit of matplotlib used to plot geographic maps.
With this function you can:
* Draw the latitude/longitude grid easily in one line of code, specifying the
lat/lon intervals.
* Use rcParams, so all your figures will look more consistent.
* Specify the label pad in points (instead of projection units).
* Place the labels indicating which margins will be used.
'''
import numpy as np
def latlon_grid(bmap, lon_int, lat_int, labels='lb', **kwargs):
'''Draws a lat-lon grid in an easy way.
Some default values are taken from rcParams instead of 'black' (color) and
1.0 (linewidth) which is the default in Basemap.
In Basemap, the label pad is computed in projection units. Now you can use
the keyword argument 'labelpad' to control this separation in points. If
not specified then this value is taken from rcParams.
Arguments:
bmap -- Basemap object.
lon_int, lat_int -- Difference in degrees from one longitude or latitude to
the next.
labels -- String specifying which margins will be used to write the labels.
If None, no label will be shown.
It is assummed that left/right margins (i.e. Y axes) correspond
to latitudes and top/bottom (X axes) to longitudes. It is valid
every combination of the characters 't' | 'b' | 'l' | 'r'
(top|bottom|left|right).
Ex: 'lrb' means that the longitude values will appear in bottom
margin and latitudes in left and right.
**kwargs -- Other arguments to drawparallels, drawmeridians and plt.text.
labelpad has units of points.
'''
# Proccesses arguments and rcParams for defult values
if 'color' not in kwargs:
kwargs['color'] = plt.rcParams['grid.color']
if 'linewidth' not in kwargs:
kwargs['linewidth'] = plt.rcParams['grid.linewidth']
if 'labelpad' in kwargs:
padx = pady = kwargs['labelpad']
del kwargs['labelpad']
else:
pady = plt.rcParams['xtick.major.pad']
padx = plt.rcParams['ytick.major.pad']
if 'size' in kwargs:
xfontsize = yfontsize = kwargs['size']
del kwargs['size']
elif 'fontsize' in kwargs:
xfontsize = yfontsize = kwargs['fontsize']
del kwargs['fontsize']
else:
xfontsize = plt.rcParams['xtick.labelsize']
yfontsize = plt.rcParams['ytick.labelsize']
# Vectors of coordinates
lon0 = bmap.lonmin // lon_int * lon_int
lat0 = bmap.latmin // lat_int * lat_int
lon1 = bmap.lonmax // lon_int * lon_int
lat1 = bmap.latmax // lat_int * lat_int
nlons = (lon1 - lon0) / lon_int + 1
nlats = (lat1 - lat0) / lat_int + 1
assert nlons / int(nlons) == 1, nlons
assert nlats / int(nlats) == 1, nlats
lons = np.linspace(lon0, lon1, int(nlons))
lats = np.linspace(lat0, lat1, int(nlats))
# If not specified then computes de label offset by 'labelpad'
xos = yos = None
if 'xoffset' in kwargs:
xos = kwargs['xoffset']
if 'yoffset' in kwargs:
yos = kwargs['yoffset']
if xos is None and yos is None:
# Page size in inches and axes limits
fig_w, fig_h = plt.gcf().get_size_inches()
points = plt.gca().get_position().get_points()
x1, y1 = tuple(points[0])
x2, y2 = tuple(points[1])
# Width and height of axes in points
w = (x2 - x1) * fig_w * 72
h = (y2 - y1) * fig_h * 72
# If the aspect relation is fixed then compute the real values
if bmap.fix_aspect:
aspect = bmap.aspect * w / h
if aspect > 1:
w = h / bmap.aspect
elif aspect < 1:
h = w * bmap.aspect
# Offset in projection units (meters or degrees)
xos = padx * (bmap.urcrnrx - bmap.llcrnrx) / w
yos = pady * (bmap.urcrnry - bmap.llcrnry) / h
# Set the labels
latlabels = [False] * 4
lonlabels = [False] * 4
if labels is not None:
pst = {'l': 0, 'r': 1, 't': 2, 'b': 3}
lst = {'l': latlabels, 'r': latlabels, 't': lonlabels, 'b': lonlabels}
for i in labels.lower():
lst[i][pst[i]] = True
# Draws the grid
bmap.drawparallels(lats, labels=latlabels, fontsize=yfontsize,
xoffset=xos, yoffset=yos, **kwargs)
bmap.drawmeridians(lons, labels=lonlabels, fontsize=xfontsize,
xoffset=xos, yoffset=yos, **kwargs)
# TEST
if __name__ == '__main__':
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as basemap
# Some rcParams (example)
plt.rc('grid', linewidth=0.5, color='g')
plt.rcParams['xtick.major.pad'] = 6
plt.rcParams['ytick.major.pad'] = 12
plt.rcParams['xtick.labelsize'] = 'small'
plt.rcParams['ytick.labelsize'] = 'x-small'
# Basemap
m = basemap.Basemap(projection='merc', llcrnrlon=-120, llcrnrlat=30,
urcrnrlon=120, urcrnrlat=70)
# Plots a figure to compare
plt.subplot(211)
x = np.linspace(-10, 10, 10000)
y = np.sin(x)
plt.plot(x, y)
plt.grid()
plt.title('Example of figure using only rc values')
# Draws the grid (using the function)
plt.subplot(212)
latlon_grid(m, 30, 10, labels='lb', dashes=[1, 3])
m.drawcoastlines()
plt.title('Using latlon_grid()')
plt.show()
| python | 5,483 |
import importlib.util
import os
import pykeops.config
from pykeops.common.compile_routines import compile_generic_routine
from pykeops.common.utils import module_exists, create_and_lock_build_folder
from pykeops.common.set_path import create_name, set_build_folder
class LoadKeOps:
"""
Load the keops shared library that corresponds to the given formula, aliases, dtype and lang.
If the shared library cannot be loaded, it will be compiled.
Note: This function is thread/process safe by using a file lock.
:return: The Python function that corresponds to the loaded Keops kernel.
"""
def __init__(self, formula, aliases, dtype, lang, optional_flags=[]):
self.formula = formula
self.aliases = aliases
self.dtype = dtype
self.lang = lang
self.optional_flags = optional_flags
# create the name from formula, aliases and dtype.
self.dll_name = create_name(
self.formula, self.aliases, self.dtype, self.lang, self.optional_flags
)
if (not module_exists(self.dll_name)) or (pykeops.config.build_type == "Debug"):
self.build_folder = set_build_folder(
pykeops.config.bin_folder, self.dll_name
)
self._safe_compile()
@create_and_lock_build_folder()
def _safe_compile(self):
compile_generic_routine(
self.formula,
self.aliases,
self.dll_name,
self.dtype,
self.lang,
self.optional_flags,
self.build_folder,
)
def import_module(self):
# if not os.path.samefile(os.path.dirname(importlib.util.find_spec(self.dll_name).origin),
# pykeops.config.bin_folder):
# raise ImportError(
# "[pyKeOps]: Current pykeops.config.bin_folder is {} but keops module {} is loaded from {} folder. "
# "This may happened when changing bin_folder **after** loading a keops module. Please check everything "
# "is fine.".format(
# pykeops.config.bin_folder, self.dll_name,
# os.path.dirname(importlib.util.find_spec(self.dll_name).origin)))
return importlib.import_module(self.dll_name)
| python | 2,286 |
# Generated by Django 3.0.1 on 2020-03-09 00:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('favorApp', '0006_favor_pendingusers'),
]
operations = [
migrations.RemoveField(
model_name='favor',
name='giver_signed',
),
migrations.RemoveField(
model_name='favor',
name='requester_signed',
),
migrations.RemoveField(
model_name='favor',
name='volunteer_event',
),
]
| python | 557 |
import asyncio
from typing import Literal
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from redbot.core.utils.chat_formatting import box, pagify
from redbot.core.utils.menus import (DEFAULT_CONTROLS, menu,
start_adding_reactions)
from redbot.core.utils.predicates import ReactionPredicate
RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"]
class Todo(commands.Cog):
"""
A simple todo list
"""
def __init__(self, bot: Red) -> None:
self.bot = bot
self.config = Config.get_conf(
self,
identifier=6732102719277,
force_registration=True,
)
self.config.register_user(todos=[])
self.config.register_global(embeds=True, menus=True)
@commands.group(invoke_without_command=True)
async def todo(self, ctx, id_: int):
"""Contains a list of commands to set and retrieve todo tasks \n Use todo <id> to get a specific todo"""
todos = await self.config.user(ctx.author).todos()
if -len(todos) < id_ < len(todos):
await ctx.send(todos[id_])
else:
await ctx.send(f"Invalid ID: {id_}")
@commands.is_owner()
@todo.command()
async def embedset(self, ctx, toggle: bool):
"""Enable/Disable embeds for todos"""
if toggle:
await self.config.embeds.set(True)
else:
await self.config.embeds.set(False)
await ctx.send(f'Sucessfully {"Enabled" if toggle else "Disabled"} embeds for todo lists')
@commands.is_owner()
@todo.command()
async def menuset(self, ctx, toggle: bool):
"""Enable/Disable menus for todos"""
if toggle:
await self.config.menus.set(True)
else:
await self.config.menus.set(False)
await ctx.send(f'Sucessfully {"Enabled" if toggle else "Disabled"} menus for todo lists')
@todo.command()
async def add(self, ctx, *, task: str):
"""Add a new task to your todo list, DO NOT STORE SENSITIVE INFO HERE"""
async with self.config.user(ctx.author).todos() as todos:
todo_id = len(todos)
todos.append(task)
await ctx.send(f"Your todo has been added successfully with the id: **{todo_id}**")
@todo.command(name="list")
async def list_todos(self, ctx):
"""List all your todos"""
todos = await self.config.user(ctx.author).todos()
if not todos:
await ctx.send("Currently, you have no TODOs")
else:
todo_text = "\n".join([f"{i} - {x}" for i, x in enumerate(todos)])
if await self.config.embeds():
pagified = tuple(pagify(todo_text, page_length=1004, shorten_by=0))
# embeds and menus
if await self.config.menus():
emb_pages = [
discord.Embed(
title="Your TODO List",
description=f"Page:{num}/{len(pagified)}\n\n{page}",
)
for num, page in enumerate(pagified, 1)
]
await menu(ctx, emb_pages, DEFAULT_CONTROLS, timeout=120)
# embeds and not menus
else:
for page in pagified:
await ctx.send(
embed=discord.Embed(
title="Your TODO List",
description=page,
)
)
else:
pagified = tuple(pagify(todo_text))
# not embeds and menus
if await self.config.menus():
await menu(ctx, pagified, DEFAULT_CONTROLS, timeout=120)
# not embeds and not menus
else:
for page in pagified:
await ctx.send(page)
@todo.command(aliases=["rearrange"])
async def reorder(self, ctx, from_: int, to: int):
async with self.config.user(ctx.author).todos() as todos:
if -len(todos) < from_ < len(todos):
if -len(todos) < to < len(todos):
todos[from_], todos[to] = todos[to], todos[from_]
await ctx.send(f"Sucessfully swapped {from_} and {to}")
else:
await ctx.send(f"Invaild ID: {to}")
else:
await ctx.send(f"Invaild ID: {from_}")
@todo.command()
async def remove(self, ctx, *indices: int):
"""Remove your todo tasks, supports multiple id removals as well\n eg:[p]todo remove 1 2 3"""
if len(indices) == 1:
async with self.config.user(ctx.author).todos() as todos:
x = todos.pop(indices[0])
await ctx.send(f"Succesfully removed: {x}")
return
removed = []
async with self.config.user(ctx.author).todos() as todos:
temp = []
for j, i in enumerate(todos):
if j not in indices:
temp.append(i)
else:
removed.append(i)
todos[:] = temp
for page in pagify(
"Succesfully removed:\n" + "\n".join([f"{i}. {x}" for i, x in enumerate(removed, 1)]),
page_length=1970,
):
await ctx.send(page)
@todo.command()
async def removeall(self, ctx, *indices: int):
"""Remove all your todo tasks"""
msg = await ctx.send("Are you sure do you want to remove all of your todos?")
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred)
except asyncio.TimeoutError:
pass
if pred.result is True:
await self.config.user_from_id(ctx.author.id).clear()
await ctx.send("Successfully removed all your TODOs")
else:
await ctx.send("Cancelled.")
async def red_delete_data_for_user(self, *, requester: RequestType, user_id: int) -> None:
# should I add anything more here?
await self.config.user_from_id(user_id).clear()
| python | 6,383 |
import torch.nn as nn
import torch
class InitialBlock(nn.Module):
"""The initial block is composed of two branches:
1. a main branch which performs a regular convolution with stride 2;
2. an extension branch which performs max-pooling.
Doing both operations in parallel and concatenating their results
allows for efficient downsampling and expansion. The main branch
outputs 13 feature maps while the extension branch outputs 3, for a
total of 16 feature maps after concatenation.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number output channels.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
padding=0,
bias=False,
relu=True):
super().__init__()
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels,
# out_channels - 3,
kernel_size=kernel_size,
stride=2,
padding=padding,
bias=bias)
# Extension branch
# self.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
def forward(self, x):
main = self.main_branch(x)
# ext = self.ext_branch(x)
# Concatenate branches
# out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(main)
return self.out_prelu(out)
class RegularBottleneck(nn.Module):
"""Regular bottlenecks are the main building block of ENet.
Main branch:
1. Shortcut connection.
Extension branch:
1. 1x1 convolution which decreases the number of channels by
``internal_ratio``, also called a projection;
2. regular, dilated or asymmetric convolution;
3. 1x1 convolution which increases the number of channels back to
``channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- channels (int): the number of input and output channels.
- internal_ratio (int, optional): a scale factor applied to
``channels`` used to compute the number of
channels after the projection. eg. given ``channels`` equal to 128 and
internal_ratio equal to 2 the number of channels after the projection
is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension
branch. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation,
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_prelu = activation
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_prelu(out)
class DownsamplingBottleneck(nn.Module):
"""Downsampling bottlenecks further downsample the feature map size.
Main branch:
1. max pooling with stride 2; indices are saved to be used for
unpooling later.
Extension branch:
1. 2x2 convolution with stride 2 that decreases the number of channels
by ``internal_ratio``, also called a projection;
2. regular convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``channels``
used to compute the number of channels after the projection. eg. given
``channels`` equal to 128 and internal_ratio equal to 2 the number of
channels after the projection is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension branch.
Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
- asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- return_indices (bool, optional): if ``True``, will return the max
indices along with the outputs. Useful when unpooling later.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
kernel_size=3,
padding=0,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - max pooling followed by feature map (channels) padding
# self.main_max1 = nn.MaxPool2d(
# kernel_size,
# stride=2,
# padding=padding,
# return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
def forward(self, x):
# Main branch shortcut
# if self.return_indices:
# main, max_indices = self.main_max1(x)
# else:
# main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
# n, ch_ext, h, w = ext.size()
# ch_main = main.size()[1]
# padding = torch.zeros(n, ch_ext - ch_main, h, w)
# # Before concatenating, check if main is on the CPU or GPU and
# # convert padding accordingly
# if main.is_cuda:
# padding = padding.cuda()
# # Concatenate
# main = torch.cat((main, padding), 1)
# # Add main and extension branches
# out = main + ext
# return self.out_prelu(out), max_indices
return self.out_prelu(ext)
class UpsamplingBottleneck(nn.Module):
"""The upsampling bottlenecks upsample the feature map resolution using max
pooling indices stored from the corresponding downsampling bottleneck.
Main branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. max unpool layer using the max pool indices from the corresponding
downsampling max pool layer.
Extension branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. transposed convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``in_channels``
used to compute the number of channels after the projection. eg. given
``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number
of channels after the projection is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in the
convolution layer described above in item 2 of the extension branch.
Default: 3.
- padding (int, optional): zero-padding added to both sides of the input.
Default: 0.
- dropout_prob (float, optional): probability of an element to be zeroed.
Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if ``True``.
Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
# nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=2,
padding=padding,
output_padding=1,
bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
# self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation)
# Transposed convolution
self.ext_conv2 = nn.Sequential(
nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=2,
padding=padding,
output_padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
# def forward(self, x, max_indices):
def forward(self, x):
# Main branch shortcut
main = self.main_conv1(x)
# main = self.main_unpool1(main, max_indices)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_prelu(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, num_classes, encoder_relu=False, decoder_relu=True):
super().__init__()
self.initial_block = InitialBlock(3, 16, padding=1, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
padding=1,
return_indices=False,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
padding=1,
return_indices=False,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=False)
def forward(self, x):
# Initial block
x = self.initial_block(x)
# Stage 1 - Encoder
# x, max_indices1_0 = self.downsample1_0(x)
x = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
# x, max_indices2_0 = self.downsample2_0(x)
x = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
x = self.regular3_0(x)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x = self.dilated3_7(x)
# Stage 4 - Decoder
# x = self.upsample4_0(x, max_indices2_0)
x = self.upsample4_0(x)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder
# x = self.upsample5_0(x, max_indices1_0)
x = self.upsample5_0(x)
x = self.regular5_1(x)
x = self.transposed_conv(x)
return x
| python | 24,293 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Command-line interface"""
import argparse
import json
import os
import textwrap
from argparse import Action, ArgumentError, RawTextHelpFormatter
from typing import Callable, Dict, Iterable, List, NamedTuple, Optional, Set, Union
from tabulate import tabulate_formats
from airflow import settings
from airflow.cli.commands.legacy_commands import check_legacy_command
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.utils.cli import ColorMode
from airflow.utils.helpers import partition
from airflow.utils.module_loading import import_string
from airflow.utils.timezone import parse as parsedate
DAGS_FOLDER = settings.DAGS_FOLDER
BUILD_DOCS = "BUILDING_AIRFLOW_DOCS" in os.environ
if BUILD_DOCS:
DAGS_FOLDER = '[AIRFLOW_HOME]/dags'
def lazy_load_command(import_path: str) -> Callable:
"""Create a lazy loader for command"""
_, _, name = import_path.rpartition('.')
def command(*args, **kwargs):
func = import_string(import_path)
return func(*args, **kwargs)
command.__name__ = name
return command
class DefaultHelpParser(argparse.ArgumentParser):
"""CustomParser to display help message"""
def _check_value(self, action, value):
"""Override _check_value and check conditionally added command"""
executor = conf.get('core', 'EXECUTOR')
if value == 'celery' and executor != ExecutorLoader.CELERY_EXECUTOR:
message = f'celery subcommand works only with CeleryExecutor, your current executor: {executor}'
raise ArgumentError(action, message)
if value == 'kubernetes':
try:
from kubernetes.client import models
if not models:
message = "kubernetes subcommand requires that ' \
'you run pip install 'apache-airflow[cncf.kubernetes]'"
raise ArgumentError(action, message)
except Exception: # pylint: disable=W0703
message = 'kubernetes subcommand requires that you pip install the kubernetes python client'
raise ArgumentError(action, message)
if action.choices is not None and value not in action.choices:
check_legacy_command(action, value)
super()._check_value(action, value)
def error(self, message):
"""Override error and use print_instead of print_usage"""
self.print_help()
self.exit(2, f'\n{self.prog} command error: {message}, see help above.\n')
# Used in Arg to enable `None' as a distinct value from "not passed"
_UNSET = object()
class Arg:
"""Class to keep information about command line argument"""
# pylint: disable=redefined-builtin,unused-argument
def __init__(self, flags=_UNSET, help=_UNSET, action=_UNSET, default=_UNSET, nargs=_UNSET, type=_UNSET,
choices=_UNSET, required=_UNSET, metavar=_UNSET):
self.flags = flags
self.kwargs = {}
for k, v in locals().items():
if v is _UNSET:
continue
if k in ("self", "flags"):
continue
self.kwargs[k] = v
# pylint: enable=redefined-builtin,unused-argument
def add_to_parser(self, parser: argparse.ArgumentParser):
"""Add this argument to an ArgumentParser"""
parser.add_argument(*self.flags, **self.kwargs)
def positive_int(value):
"""Define a positive int type for an argument."""
try:
value = int(value)
if value > 0:
return value
except ValueError:
pass
raise argparse.ArgumentTypeError(f"invalid positive int value: '{value}'")
# Shared
ARG_DAG_ID = Arg(
("dag_id",),
help="The id of the dag")
ARG_TASK_ID = Arg(
("task_id",),
help="The id of the task")
ARG_EXECUTION_DATE = Arg(
("execution_date",),
help="The execution date of the DAG",
type=parsedate)
ARG_TASK_REGEX = Arg(
("-t", "--task-regex"),
help="The regex to filter specific task_ids to backfill (optional)")
ARG_SUBDIR = Arg(
("-S", "--subdir"),
help=(
"File location or directory from which to look for the dag. "
"Defaults to '[AIRFLOW_HOME]/dags' where [AIRFLOW_HOME] is the "
"value you set for 'AIRFLOW_HOME' config you set in 'airflow.cfg' "),
default=DAGS_FOLDER)
ARG_START_DATE = Arg(
("-s", "--start-date"),
help="Override start_date YYYY-MM-DD",
type=parsedate)
ARG_END_DATE = Arg(
("-e", "--end-date"),
help="Override end_date YYYY-MM-DD",
type=parsedate)
ARG_OUTPUT_PATH = Arg(
("-o", "--output-path",),
help="The output for generated yaml files",
type=str,
default=os.getcwd())
ARG_DRY_RUN = Arg(
("-n", "--dry-run"),
help="Perform a dry run for each task. Only renders Template Fields for each task, nothing else",
action="store_true")
ARG_PID = Arg(
("--pid",),
help="PID file location",
nargs='?')
ARG_DAEMON = Arg(
("-D", "--daemon"),
help="Daemonize instead of running in the foreground",
action="store_true")
ARG_STDERR = Arg(
("--stderr",),
help="Redirect stderr to this file")
ARG_STDOUT = Arg(
("--stdout",),
help="Redirect stdout to this file")
ARG_LOG_FILE = Arg(
("-l", "--log-file"),
help="Location of the log file")
ARG_YES = Arg(
("-y", "--yes"),
help="Do not prompt to confirm reset. Use with care!",
action="store_true",
default=False)
ARG_OUTPUT = Arg(
("--output",),
help=(
"Output table format. The specified value is passed to "
"the tabulate module (https://pypi.org/project/tabulate/). "
),
metavar="FORMAT",
choices=tabulate_formats,
default="plain")
ARG_COLOR = Arg(
('--color',),
help="Do emit colored output (default: auto)",
choices={ColorMode.ON, ColorMode.OFF, ColorMode.AUTO},
default=ColorMode.AUTO)
# list_dag_runs
ARG_NO_BACKFILL = Arg(
("--no-backfill",),
help="filter all the backfill dagruns given the dag id",
action="store_true")
ARG_STATE = Arg(
("--state",),
help="Only list the dag runs corresponding to the state")
# list_jobs
ARG_LIMIT = Arg(
("--limit",),
help="Return a limited number of records")
# next_execution
ARG_NUM_EXECUTIONS = Arg(
("-n", "--num-executions"),
default=1,
type=positive_int,
help="The number of next execution datetimes to show")
# backfill
ARG_MARK_SUCCESS = Arg(
("-m", "--mark-success"),
help="Mark jobs as succeeded without running them",
action="store_true")
ARG_VERBOSE = Arg(
("-v", "--verbose"),
help="Make logging output more verbose",
action="store_true")
ARG_LOCAL = Arg(
("-l", "--local"),
help="Run the task using the LocalExecutor",
action="store_true")
ARG_DONOT_PICKLE = Arg(
("-x", "--donot-pickle"),
help=(
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code"),
action="store_true")
ARG_BF_IGNORE_DEPENDENCIES = Arg(
("-i", "--ignore-dependencies"),
help=(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction "
"with task_regex"),
action="store_true")
ARG_BF_IGNORE_FIRST_DEPENDS_ON_PAST = Arg(
("-I", "--ignore-first-depends-on-past"),
help=(
"Ignores depends_on_past dependencies for the first "
"set of tasks only (subsequent executions in the backfill "
"DO respect depends_on_past)"),
action="store_true")
ARG_POOL = Arg(("--pool",), "Resource pool to use")
ARG_DELAY_ON_LIMIT = Arg(
("--delay-on-limit",),
help=("Amount of time in seconds to wait when the limit "
"on maximum active dag runs (max_active_runs) has "
"been reached before trying to execute a dag run "
"again"),
type=float,
default=1.0)
ARG_RESET_DAG_RUN = Arg(
("--reset-dagruns",),
help=(
"if set, the backfill will delete existing "
"backfill-related DAG runs and start "
"anew with fresh, running DAG runs"),
action="store_true")
ARG_RERUN_FAILED_TASKS = Arg(
("--rerun-failed-tasks",),
help=(
"if set, the backfill will auto-rerun "
"all the failed tasks for the backfill date range "
"instead of throwing exceptions"),
action="store_true")
ARG_RUN_BACKWARDS = Arg(
("-B", "--run-backwards",),
help=(
"if set, the backfill will run tasks from the most "
"recent day first. if there are tasks that depend_on_past "
"this option will throw an exception"),
action="store_true")
# test_dag
ARG_SHOW_DAGRUN = Arg(
("--show-dagrun", ),
help=(
"After completing the backfill, shows the diagram for current DAG Run.\n"
"\n"
"The diagram is in DOT language\n"),
action='store_true')
ARG_IMGCAT_DAGRUN = Arg(
("--imgcat-dagrun", ),
help=(
"After completing the dag run, prints a diagram on the screen for the "
"current DAG Run using the imgcat tool.\n"
),
action='store_true')
ARG_SAVE_DAGRUN = Arg(
("--save-dagrun", ),
help=(
"After completing the backfill, saves the diagram for current DAG Run to the indicated file.\n"
"\n"
))
# list_tasks
ARG_TREE = Arg(
("-t", "--tree"),
help="Tree view",
action="store_true")
# clear
ARG_UPSTREAM = Arg(
("-u", "--upstream"),
help="Include upstream tasks",
action="store_true")
ARG_ONLY_FAILED = Arg(
("-f", "--only-failed"),
help="Only failed jobs",
action="store_true")
ARG_ONLY_RUNNING = Arg(
("-r", "--only-running"),
help="Only running jobs",
action="store_true")
ARG_DOWNSTREAM = Arg(
("-d", "--downstream"),
help="Include downstream tasks",
action="store_true")
ARG_EXCLUDE_SUBDAGS = Arg(
("-x", "--exclude-subdags"),
help="Exclude subdags",
action="store_true")
ARG_EXCLUDE_PARENTDAG = Arg(
("-X", "--exclude-parentdag"),
help="Exclude ParentDAGS if the task cleared is a part of a SubDAG",
action="store_true")
ARG_DAG_REGEX = Arg(
("-R", "--dag-regex"),
help="Search dag_id as regex instead of exact string",
action="store_true")
# show_dag
ARG_SAVE = Arg(
("-s", "--save"),
help="Saves the result to the indicated file.")
ARG_IMGCAT = Arg(
("--imgcat", ),
help=(
"Displays graph using the imgcat tool."),
action='store_true')
# trigger_dag
ARG_RUN_ID = Arg(
("-r", "--run-id"),
help="Helps to identify this run")
ARG_CONF = Arg(
('-c', '--conf'),
help="JSON string that gets pickled into the DagRun's conf attribute")
ARG_EXEC_DATE = Arg(
("-e", "--exec-date"),
help="The execution date of the DAG",
type=parsedate)
# pool
ARG_POOL_NAME = Arg(
("pool",),
metavar='NAME',
help="Pool name")
ARG_POOL_SLOTS = Arg(
("slots",),
type=int,
help="Pool slots")
ARG_POOL_DESCRIPTION = Arg(
("description",),
help="Pool description")
ARG_POOL_IMPORT = Arg(
("file",),
metavar="FILEPATH",
help="Import pools from JSON file")
ARG_POOL_EXPORT = Arg(
("file",),
metavar="FILEPATH",
help="Export all pools to JSON file")
# variables
ARG_VAR = Arg(
("key",),
help="Variable key")
ARG_VAR_VALUE = Arg(
("value",),
metavar='VALUE',
help="Variable value")
ARG_DEFAULT = Arg(
("-d", "--default"),
metavar="VAL",
default=None,
help="Default value returned if variable does not exist")
ARG_JSON = Arg(
("-j", "--json"),
help="Deserialize JSON variable",
action="store_true")
ARG_VAR_IMPORT = Arg(
("file",),
help="Import variables from JSON file")
ARG_VAR_EXPORT = Arg(
("file",),
help="Export all variables to JSON file")
# kerberos
ARG_PRINCIPAL = Arg(
("principal",),
help="kerberos principal",
nargs='?')
ARG_KEYTAB = Arg(
("-k", "--keytab"),
help="keytab",
nargs='?',
default=conf.get('kerberos', 'keytab'))
# run
# TODO(aoen): "force" is a poor choice of name here since it implies it overrides
# all dependencies (not just past success), e.g. the ignore_depends_on_past
# dependency. This flag should be deprecated and renamed to 'ignore_ti_state' and
# the "ignore_all_dependencies" command should be called the"force" command
# instead.
ARG_INTERACTIVE = Arg(
('-N', '--interactive'),
help='Do not capture standard output and error streams '
'(useful for interactive debugging)',
action='store_true')
ARG_FORCE = Arg(
("-f", "--force"),
help="Ignore previous task instance state, rerun regardless if task already succeeded/failed",
action="store_true")
ARG_RAW = Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true")
ARG_IGNORE_ALL_DEPENDENCIES = Arg(
("-A", "--ignore-all-dependencies"),
help="Ignores all non-critical dependencies, including ignore_ti_state and ignore_task_deps",
action="store_true")
# TODO(aoen): ignore_dependencies is a poor choice of name here because it is too
# vague (e.g. a task being in the appropriate state to be run is also a dependency
# but is not ignored by this flag), the name 'ignore_task_dependencies' is
# slightly better (as it ignores all dependencies that are specific to the task),
# so deprecate the old command name and use this instead.
ARG_IGNORE_DEPENDENCIES = Arg(
("-i", "--ignore-dependencies"),
help="Ignore task-specific dependencies, e.g. upstream, depends_on_past, and retry delay dependencies",
action="store_true")
ARG_IGNORE_DEPENDS_ON_PAST = Arg(
("-I", "--ignore-depends-on-past"),
help="Ignore depends_on_past dependencies (but respect upstream dependencies)",
action="store_true")
ARG_SHIP_DAG = Arg(
("--ship-dag",),
help="Pickles (serializes) the DAG and ships it to the worker",
action="store_true")
ARG_PICKLE = Arg(
("-p", "--pickle"),
help="Serialized pickle object of the entire dag (used internally)")
ARG_JOB_ID = Arg(
("-j", "--job-id"),
help=argparse.SUPPRESS)
ARG_CFG_PATH = Arg(
("--cfg-path",),
help="Path to config file to use instead of airflow.cfg")
ARG_MIGRATION_TIMEOUT = Arg(
("-t", "--migration-wait-timeout"),
help="timeout to wait for db to migrate ",
type=int,
default=0,
)
# webserver
ARG_PORT = Arg(
("-p", "--port"),
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="The port on which to run the server")
ARG_SSL_CERT = Arg(
("--ssl-cert",),
default=conf.get('webserver', 'WEB_SERVER_SSL_CERT'),
help="Path to the SSL certificate for the webserver")
ARG_SSL_KEY = Arg(
("--ssl-key",),
default=conf.get('webserver', 'WEB_SERVER_SSL_KEY'),
help="Path to the key to use with the SSL certificate")
ARG_WORKERS = Arg(
("-w", "--workers"),
default=conf.get('webserver', 'WORKERS'),
type=int,
help="Number of workers to run the webserver on")
ARG_WORKERCLASS = Arg(
("-k", "--workerclass"),
default=conf.get('webserver', 'WORKER_CLASS'),
choices=['sync', 'eventlet', 'gevent', 'tornado'],
help="The worker class to use for Gunicorn")
ARG_WORKER_TIMEOUT = Arg(
("-t", "--worker-timeout"),
default=conf.get('webserver', 'WEB_SERVER_WORKER_TIMEOUT'),
type=int,
help="The timeout for waiting on webserver workers")
ARG_HOSTNAME = Arg(
("-H", "--hostname"),
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server")
ARG_DEBUG = Arg(
("-d", "--debug"),
help="Use the server that ships with Flask in debug mode",
action="store_true")
ARG_ACCESS_LOGFILE = Arg(
("-A", "--access-logfile"),
default=conf.get('webserver', 'ACCESS_LOGFILE'),
help="The logfile to store the webserver access log. Use '-' to print to "
"stderr")
ARG_ERROR_LOGFILE = Arg(
("-E", "--error-logfile"),
default=conf.get('webserver', 'ERROR_LOGFILE'),
help="The logfile to store the webserver error log. Use '-' to print to "
"stderr")
# scheduler
ARG_DAG_ID_OPT = Arg(
("-d", "--dag-id"),
help="The id of the dag to run"
)
ARG_NUM_RUNS = Arg(
("-n", "--num-runs"),
default=conf.getint('scheduler', 'num_runs'),
type=int,
help="Set the number of runs to execute before exiting")
# worker
ARG_DO_PICKLE = Arg(
("-p", "--do-pickle"),
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code"),
action="store_true")
ARG_QUEUES = Arg(
("-q", "--queues"),
help="Comma delimited list of queues to serve",
default=conf.get('celery', 'DEFAULT_QUEUE'))
ARG_CONCURRENCY = Arg(
("-c", "--concurrency"),
type=int,
help="The number of worker processes",
default=conf.get('celery', 'worker_concurrency'))
ARG_CELERY_HOSTNAME = Arg(
("-H", "--celery-hostname"),
help=("Set the hostname of celery worker "
"if you have multiple workers on a single machine"))
ARG_UMASK = Arg(
("-u", "--umask"),
help="Set the umask of celery worker in daemon mode",
default=conf.get('celery', 'worker_umask'))
# flower
ARG_BROKER_API = Arg(("-a", "--broker-api"), help="Broker API")
ARG_FLOWER_HOSTNAME = Arg(
("-H", "--hostname"),
default=conf.get('celery', 'FLOWER_HOST'),
help="Set the hostname on which to run the server")
ARG_FLOWER_PORT = Arg(
("-p", "--port"),
default=conf.get('celery', 'FLOWER_PORT'),
type=int,
help="The port on which to run the server")
ARG_FLOWER_CONF = Arg(
("-c", "--flower-conf"),
help="Configuration file for flower")
ARG_FLOWER_URL_PREFIX = Arg(
("-u", "--url-prefix"),
default=conf.get('celery', 'FLOWER_URL_PREFIX'),
help="URL prefix for Flower")
ARG_FLOWER_BASIC_AUTH = Arg(
("-A", "--basic-auth"),
default=conf.get('celery', 'FLOWER_BASIC_AUTH'),
help=("Securing Flower with Basic Authentication. "
"Accepts user:password pairs separated by a comma. "
"Example: flower_basic_auth = user1:password1,user2:password2"))
ARG_TASK_PARAMS = Arg(
("-t", "--task-params"),
help="Sends a JSON params dict to the task")
ARG_POST_MORTEM = Arg(
("-m", "--post-mortem"),
action="store_true",
help="Open debugger on uncaught exception")
ARG_ENV_VARS = Arg(
("--env-vars", ),
help="Set env var in both parsing time and runtime for each of entry supplied in a JSON dict",
type=json.loads)
# connections
ARG_CONN_ID = Arg(
('conn_id',),
help='Connection id, required to get/add/delete a connection',
type=str)
ARG_CONN_ID_FILTER = Arg(
('--conn-id',),
help='If passed, only items with the specified connection ID will be displayed',
type=str)
ARG_CONN_URI = Arg(
('--conn-uri',),
help='Connection URI, required to add a connection without conn_type',
type=str)
ARG_CONN_TYPE = Arg(
('--conn-type',),
help='Connection type, required to add a connection without conn_uri',
type=str)
ARG_CONN_HOST = Arg(
('--conn-host',),
help='Connection host, optional when adding a connection',
type=str)
ARG_CONN_LOGIN = Arg(
('--conn-login',),
help='Connection login, optional when adding a connection',
type=str)
ARG_CONN_PASSWORD = Arg(
('--conn-password',),
help='Connection password, optional when adding a connection',
type=str)
ARG_CONN_SCHEMA = Arg(
('--conn-schema',),
help='Connection schema, optional when adding a connection',
type=str)
ARG_CONN_PORT = Arg(
('--conn-port',),
help='Connection port, optional when adding a connection',
type=str)
ARG_CONN_EXTRA = Arg(
('--conn-extra',),
help='Connection `Extra` field, optional when adding a connection',
type=str)
ARG_CONN_EXPORT = Arg(
('file',),
help='Output file path for exporting the connections',
type=argparse.FileType('w', encoding='UTF-8'))
ARG_CONN_EXPORT_FORMAT = Arg(
('--format',),
help='Format of the connections data in file',
type=str,
choices=['json', 'yaml', 'env'])
# users
ARG_USERNAME = Arg(
('-u', '--username'),
help='Username of the user',
required=True,
type=str)
ARG_USERNAME_OPTIONAL = Arg(
('-u', '--username'),
help='Username of the user',
type=str)
ARG_FIRSTNAME = Arg(
('-f', '--firstname'),
help='First name of the user',
required=True,
type=str)
ARG_LASTNAME = Arg(
('-l', '--lastname'),
help='Last name of the user',
required=True,
type=str)
ARG_ROLE = Arg(
('-r', '--role'),
help='Role of the user. Existing roles include Admin, '
'User, Op, Viewer, and Public',
required=True,
type=str,)
ARG_EMAIL = Arg(
('-e', '--email'),
help='Email of the user',
required=True,
type=str)
ARG_EMAIL_OPTIONAL = Arg(
('-e', '--email'),
help='Email of the user',
type=str)
ARG_PASSWORD = Arg(
('-p', '--password'),
help='Password of the user, required to create a user '
'without --use-random-password',
type=str)
ARG_USE_RANDOM_PASSWORD = Arg(
('--use-random-password',),
help='Do not prompt for password. Use random string instead.'
' Required to create a user without --password ',
default=False,
action='store_true')
ARG_USER_IMPORT = Arg(
("import",),
metavar="FILEPATH",
help="Import users from JSON file. Example format::\n" +
textwrap.indent(textwrap.dedent('''
[
{
"email": "[email protected]",
"firstname": "Jon",
"lastname": "Doe",
"roles": ["Public"],
"username": "jondoe"
}
]'''), " " * 4))
ARG_USER_EXPORT = Arg(
("export",),
metavar="FILEPATH",
help="Export all users to JSON file")
# roles
ARG_CREATE_ROLE = Arg(
('-c', '--create'),
help='Create a new role',
action='store_true')
ARG_LIST_ROLES = Arg(
('-l', '--list'),
help='List roles',
action='store_true')
ARG_ROLES = Arg(
('role',),
help='The name of a role',
nargs='*')
ARG_AUTOSCALE = Arg(
('-a', '--autoscale'),
help="Minimum and Maximum number of worker to autoscale")
ARG_SKIP_SERVE_LOGS = Arg(
("-s", "--skip-serve-logs"),
default=False,
help="Don't start the serve logs process along with the workers",
action="store_true")
# info
ARG_ANONYMIZE = Arg(
('--anonymize',),
help=(
'Minimize any personal identifiable information. '
'Use it when sharing output with others.'
),
action='store_true'
)
ARG_FILE_IO = Arg(
('--file-io',),
help=(
'Send output to file.io service and returns link.'
),
action='store_true'
)
# config
ARG_SECTION = Arg(
("section",),
help="The section name",
)
ARG_OPTION = Arg(
("option",),
help="The option name",
)
ALTERNATIVE_CONN_SPECS_ARGS = [
ARG_CONN_TYPE, ARG_CONN_HOST, ARG_CONN_LOGIN, ARG_CONN_PASSWORD, ARG_CONN_SCHEMA, ARG_CONN_PORT
]
class ActionCommand(NamedTuple):
"""Single CLI command"""
name: str
help: str
func: Callable
args: Iterable[Arg]
description: Optional[str] = None
epilog: Optional[str] = None
class GroupCommand(NamedTuple):
"""ClI command with subcommands"""
name: str
help: str
subcommands: Iterable
description: Optional[str] = None
epilog: Optional[str] = None
CLICommand = Union[ActionCommand, GroupCommand]
DAGS_COMMANDS = (
ActionCommand(
name='list',
help="List all the DAGs",
func=lazy_load_command('airflow.cli.commands.dag_command.dag_list_dags'),
args=(ARG_SUBDIR, ARG_OUTPUT),
),
ActionCommand(
name='report',
help='Show DagBag loading report',
func=lazy_load_command('airflow.cli.commands.dag_command.dag_report'),
args=(ARG_SUBDIR, ARG_OUTPUT),
),
ActionCommand(
name='list-runs',
help="List DAG runs given a DAG id",
description=(
"List DAG runs given a DAG id. If state option is given, it will only search for all the "
"dagruns with the given state. If no_backfill option is given, it will filter out all "
"backfill dagruns for given dag id. If start_date is given, it will filter out all the "
"dagruns that were executed before this date. If end_date is given, it will filter out "
"all the dagruns that were executed after this date. "
),
func=lazy_load_command('airflow.cli.commands.dag_command.dag_list_dag_runs'),
args=(ARG_DAG_ID_OPT, ARG_NO_BACKFILL, ARG_STATE, ARG_OUTPUT, ARG_START_DATE, ARG_END_DATE),
),
ActionCommand(
name='list-jobs',
help="List the jobs",
func=lazy_load_command('airflow.cli.commands.dag_command.dag_list_jobs'),
args=(ARG_DAG_ID_OPT, ARG_STATE, ARG_LIMIT, ARG_OUTPUT,),
),
ActionCommand(
name='state',
help="Get the status of a dag run",
func=lazy_load_command('airflow.cli.commands.dag_command.dag_state'),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_SUBDIR),
),
ActionCommand(
name='next-execution',
help="Get the next execution datetimes of a DAG",
description=(
"Get the next execution datetimes of a DAG. It returns one execution unless the "
"num-executions option is given"
),
func=lazy_load_command('airflow.cli.commands.dag_command.dag_next_execution'),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_NUM_EXECUTIONS),
),
ActionCommand(
name='pause',
help='Pause a DAG',
func=lazy_load_command('airflow.cli.commands.dag_command.dag_pause'),
args=(ARG_DAG_ID, ARG_SUBDIR),
),
ActionCommand(
name='unpause',
help='Resume a paused DAG',
func=lazy_load_command('airflow.cli.commands.dag_command.dag_unpause'),
args=(ARG_DAG_ID, ARG_SUBDIR),
),
ActionCommand(
name='trigger',
help='Trigger a DAG run',
func=lazy_load_command('airflow.cli.commands.dag_command.dag_trigger'),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_RUN_ID, ARG_CONF, ARG_EXEC_DATE),
),
ActionCommand(
name='delete',
help="Delete all DB records related to the specified DAG",
func=lazy_load_command('airflow.cli.commands.dag_command.dag_delete'),
args=(ARG_DAG_ID, ARG_YES),
),
ActionCommand(
name='show',
help="Displays DAG's tasks with their dependencies",
description=("The --imgcat option only works in iTerm.\n"
"\n"
"For more information, see: https://www.iterm2.com/documentation-images.html\n"
"\n"
"The --save option saves the result to the indicated file.\n"
"\n"
"The file format is determined by the file extension. "
"For more information about supported "
"format, see: https://www.graphviz.org/doc/info/output.html\n"
"\n"
"If you want to create a PNG file then you should execute the following command:\n"
"airflow dags show <DAG_ID> --save output.png\n"
"\n"
"If you want to create a DOT file then you should execute the following command:\n"
"airflow dags show <DAG_ID> --save output.dot\n"),
func=lazy_load_command('airflow.cli.commands.dag_command.dag_show'),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_SAVE, ARG_IMGCAT,),
),
ActionCommand(
name='backfill',
help="Run subsections of a DAG for a specified date range",
description=(
"Run subsections of a DAG for a specified date range. If reset_dag_run option is used, "
"backfill will first prompt users whether airflow should clear all the previous dag_run and "
"task_instances within the backfill date range. If rerun_failed_tasks is used, backfill "
"will auto re-run the previous failed task instances within the backfill date range"
),
func=lazy_load_command('airflow.cli.commands.dag_command.dag_backfill'),
args=(
ARG_DAG_ID, ARG_TASK_REGEX, ARG_START_DATE, ARG_END_DATE, ARG_MARK_SUCCESS, ARG_LOCAL,
ARG_DONOT_PICKLE, ARG_YES, ARG_BF_IGNORE_DEPENDENCIES, ARG_BF_IGNORE_FIRST_DEPENDS_ON_PAST,
ARG_SUBDIR, ARG_POOL, ARG_DELAY_ON_LIMIT, ARG_DRY_RUN, ARG_VERBOSE, ARG_CONF,
ARG_RESET_DAG_RUN, ARG_RERUN_FAILED_TASKS, ARG_RUN_BACKWARDS
),
),
ActionCommand(
name='test',
help="Execute one single DagRun",
description=("Execute one single DagRun for a given DAG and execution date, "
"using the DebugExecutor.\n"
"\n"
"The --imgcat-dagrun option only works in iTerm.\n"
"\n"
"For more information, see: https://www.iterm2.com/documentation-images.html\n"
"\n"
"If --save-dagrun is used, then, after completing the backfill, saves the diagram "
"for current DAG Run to the indicated file.\n"
"The file format is determined by the file extension. "
"For more information about supported format, "
"see: https://www.graphviz.org/doc/info/output.html\n"
"\n"
"If you want to create a PNG file then you should execute the following command:\n"
"airflow dags test <DAG_ID> <EXECUTION_DATE> --save-dagrun output.png\n"
"\n"
"If you want to create a DOT file then you should execute the following command:\n"
"airflow dags test <DAG_ID> <EXECUTION_DATE> --save-dagrun output.dot\n"),
func=lazy_load_command('airflow.cli.commands.dag_command.dag_test'),
args=(
ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_SHOW_DAGRUN, ARG_IMGCAT_DAGRUN, ARG_SAVE_DAGRUN
),
),
)
TASKS_COMMANDS = (
ActionCommand(
name='list',
help="List the tasks within a DAG",
func=lazy_load_command('airflow.cli.commands.task_command.task_list'),
args=(ARG_DAG_ID, ARG_TREE, ARG_SUBDIR),
),
ActionCommand(
name='clear',
help="Clear a set of task instance, as if they never ran",
func=lazy_load_command('airflow.cli.commands.task_command.task_clear'),
args=(
ARG_DAG_ID, ARG_TASK_REGEX, ARG_START_DATE, ARG_END_DATE, ARG_SUBDIR, ARG_UPSTREAM,
ARG_DOWNSTREAM, ARG_YES, ARG_ONLY_FAILED, ARG_ONLY_RUNNING, ARG_EXCLUDE_SUBDAGS,
ARG_EXCLUDE_PARENTDAG, ARG_DAG_REGEX
),
),
ActionCommand(
name='state',
help="Get the status of a task instance",
func=lazy_load_command('airflow.cli.commands.task_command.task_state'),
args=(ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE, ARG_SUBDIR),
),
ActionCommand(
name='failed-deps',
help="Returns the unmet dependencies for a task instance",
description=(
"Returns the unmet dependencies for a task instance from the perspective of the scheduler. "
"In other words, why a task instance doesn't get scheduled and then queued by the scheduler, "
"and then run by an executor."
),
func=lazy_load_command('airflow.cli.commands.task_command.task_failed_deps'),
args=(ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE, ARG_SUBDIR),
),
ActionCommand(
name='render',
help="Render a task instance's template(s)",
func=lazy_load_command('airflow.cli.commands.task_command.task_render'),
args=(ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE, ARG_SUBDIR),
),
ActionCommand(
name='run',
help="Run a single task instance",
func=lazy_load_command('airflow.cli.commands.task_command.task_run'),
args=(
ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_MARK_SUCCESS, ARG_FORCE,
ARG_POOL, ARG_CFG_PATH, ARG_LOCAL, ARG_RAW, ARG_IGNORE_ALL_DEPENDENCIES,
ARG_IGNORE_DEPENDENCIES, ARG_IGNORE_DEPENDS_ON_PAST, ARG_SHIP_DAG, ARG_PICKLE, ARG_JOB_ID,
ARG_INTERACTIVE,
),
),
ActionCommand(
name='test',
help="Test a task instance",
description=(
"Test a task instance. This will run a task without checking for dependencies or recording "
"its state in the database"
),
func=lazy_load_command('airflow.cli.commands.task_command.task_test'),
args=(
ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_DRY_RUN,
ARG_TASK_PARAMS, ARG_POST_MORTEM, ARG_ENV_VARS
),
),
ActionCommand(
name='states-for-dag-run',
help="Get the status of all task instances in a dag run",
func=lazy_load_command('airflow.cli.commands.task_command.task_states_for_dag_run'),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_OUTPUT),
),
)
POOLS_COMMANDS = (
ActionCommand(
name='list',
help='List pools',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_list'),
args=(ARG_OUTPUT,),
),
ActionCommand(
name='get',
help='Get pool size',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_get'),
args=(ARG_POOL_NAME, ARG_OUTPUT,),
),
ActionCommand(
name='set',
help='Configure pool',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_set'),
args=(ARG_POOL_NAME, ARG_POOL_SLOTS, ARG_POOL_DESCRIPTION, ARG_OUTPUT,),
),
ActionCommand(
name='delete',
help='Delete pool',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_delete'),
args=(ARG_POOL_NAME, ARG_OUTPUT,),
),
ActionCommand(
name='import',
help='Import pools',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_import'),
args=(ARG_POOL_IMPORT, ARG_OUTPUT,),
),
ActionCommand(
name='export',
help='Export all pools',
func=lazy_load_command('airflow.cli.commands.pool_command.pool_export'),
args=(ARG_POOL_EXPORT, ARG_OUTPUT,),
),
)
VARIABLES_COMMANDS = (
ActionCommand(
name='list',
help='List variables',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_list'),
args=(),
),
ActionCommand(
name='get',
help='Get variable',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_get'),
args=(ARG_VAR, ARG_JSON, ARG_DEFAULT),
),
ActionCommand(
name='set',
help='Set variable',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_set'),
args=(ARG_VAR, ARG_VAR_VALUE, ARG_JSON),
),
ActionCommand(
name='delete',
help='Delete variable',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_delete'),
args=(ARG_VAR,),
),
ActionCommand(
name='import',
help='Import variables',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_import'),
args=(ARG_VAR_IMPORT,),
),
ActionCommand(
name='export',
help='Export all variables',
func=lazy_load_command('airflow.cli.commands.variable_command.variables_export'),
args=(ARG_VAR_EXPORT,),
),
)
DB_COMMANDS = (
ActionCommand(
name='init',
help="Initialize the metadata database",
func=lazy_load_command('airflow.cli.commands.db_command.initdb'),
args=(),
),
ActionCommand(
name="check-migrations",
help="Check if migration have finished",
description=(
"Check if migration have finished (or continually check until timeout)"
),
func=lazy_load_command('airflow.cli.commands.db_command.check_migrations'),
args=(ARG_MIGRATION_TIMEOUT,),
),
ActionCommand(
name='reset',
help="Burn down and rebuild the metadata database",
func=lazy_load_command('airflow.cli.commands.db_command.resetdb'),
args=(ARG_YES,),
),
ActionCommand(
name='upgrade',
help="Upgrade the metadata database to latest version",
func=lazy_load_command('airflow.cli.commands.db_command.upgradedb'),
args=(),
),
ActionCommand(
name='shell',
help="Runs a shell to access the database",
func=lazy_load_command('airflow.cli.commands.db_command.shell'),
args=(),
),
ActionCommand(
name='check',
help="Check if the database can be reached",
func=lazy_load_command('airflow.cli.commands.db_command.check'),
args=(),
),
)
CONNECTIONS_COMMANDS = (
ActionCommand(
name='get',
help='Get a connection',
func=lazy_load_command('airflow.cli.commands.connection_command.connections_get'),
args=(ARG_CONN_ID, ARG_COLOR),
),
ActionCommand(
name='list',
help='List connections',
func=lazy_load_command('airflow.cli.commands.connection_command.connections_list'),
args=(ARG_OUTPUT, ARG_CONN_ID_FILTER),
),
ActionCommand(
name='add',
help='Add a connection',
func=lazy_load_command('airflow.cli.commands.connection_command.connections_add'),
args=(ARG_CONN_ID, ARG_CONN_URI, ARG_CONN_EXTRA) + tuple(ALTERNATIVE_CONN_SPECS_ARGS),
),
ActionCommand(
name='delete',
help='Delete a connection',
func=lazy_load_command('airflow.cli.commands.connection_command.connections_delete'),
args=(ARG_CONN_ID,),
),
ActionCommand(
name='export',
help='Export all connections',
description=("All connections can be exported in STDOUT using the following command:\n"
"airflow connections export -\n"
"The file format can be determined by the provided file extension. eg, The following "
"command will export the connections in JSON format:\n"
"airflow connections export /tmp/connections.json\n"
"The --format parameter can be used to mention the connections format. eg, "
"the default format is JSON in STDOUT mode, which can be overridden using: \n"
"airflow connections export - --format yaml\n"
"The --format parameter can also be used for the files, for example:\n"
"airflow connections export /tmp/connections --format json\n"),
func=lazy_load_command('airflow.cli.commands.connection_command.connections_export'),
args=(ARG_CONN_EXPORT, ARG_CONN_EXPORT_FORMAT,),
),
)
USERS_COMMANDS = (
ActionCommand(
name='list',
help='List users',
func=lazy_load_command('airflow.cli.commands.user_command.users_list'),
args=(ARG_OUTPUT,),
),
ActionCommand(
name='create',
help='Create a user',
func=lazy_load_command('airflow.cli.commands.user_command.users_create'),
args=(
ARG_ROLE, ARG_USERNAME, ARG_EMAIL, ARG_FIRSTNAME, ARG_LASTNAME, ARG_PASSWORD,
ARG_USE_RANDOM_PASSWORD
),
epilog=(
'examples:\n'
'To create an user with "Admin" role and username equals to "admin", run:\n'
'\n'
' $ airflow users create \\\n'
' --username admin \\\n'
' --firstname FIRST_NAME \\\n'
' --lastname LAST_NAME \\\n'
' --role Admin \\\n'
' --email [email protected]'
)
),
ActionCommand(
name='delete',
help='Delete a user',
func=lazy_load_command('airflow.cli.commands.user_command.users_delete'),
args=(ARG_USERNAME,),
),
ActionCommand(
name='add-role',
help='Add role to a user',
func=lazy_load_command('airflow.cli.commands.user_command.add_role'),
args=(ARG_USERNAME_OPTIONAL, ARG_EMAIL_OPTIONAL, ARG_ROLE),
),
ActionCommand(
name='remove-role',
help='Remove role from a user',
func=lazy_load_command('airflow.cli.commands.user_command.remove_role'),
args=(ARG_USERNAME_OPTIONAL, ARG_EMAIL_OPTIONAL, ARG_ROLE),
),
ActionCommand(
name='import',
help='Import users',
func=lazy_load_command('airflow.cli.commands.user_command.users_import'),
args=(ARG_USER_IMPORT,),
),
ActionCommand(
name='export',
help='Export all users',
func=lazy_load_command('airflow.cli.commands.user_command.users_export'),
args=(ARG_USER_EXPORT,),
),
)
ROLES_COMMANDS = (
ActionCommand(
name='list',
help='List roles',
func=lazy_load_command('airflow.cli.commands.role_command.roles_list'),
args=(ARG_OUTPUT,),
),
ActionCommand(
name='create',
help='Create role',
func=lazy_load_command('airflow.cli.commands.role_command.roles_create'),
args=(ARG_ROLES,),
),
)
CELERY_COMMANDS = (
ActionCommand(
name='worker',
help="Start a Celery worker node",
func=lazy_load_command('airflow.cli.commands.celery_command.worker'),
args=(
ARG_DO_PICKLE, ARG_QUEUES, ARG_CONCURRENCY, ARG_CELERY_HOSTNAME, ARG_PID, ARG_DAEMON,
ARG_UMASK, ARG_STDOUT, ARG_STDERR, ARG_LOG_FILE, ARG_AUTOSCALE, ARG_SKIP_SERVE_LOGS
),
),
ActionCommand(
name='flower',
help="Start a Celery Flower",
func=lazy_load_command('airflow.cli.commands.celery_command.flower'),
args=(
ARG_FLOWER_HOSTNAME, ARG_FLOWER_PORT, ARG_FLOWER_CONF, ARG_FLOWER_URL_PREFIX,
ARG_FLOWER_BASIC_AUTH, ARG_BROKER_API, ARG_PID, ARG_DAEMON, ARG_STDOUT, ARG_STDERR,
ARG_LOG_FILE
),
),
ActionCommand(
name='stop',
help="Stop the Celery worker gracefully",
func=lazy_load_command('airflow.cli.commands.celery_command.stop_worker'),
args=(),
)
)
CONFIG_COMMANDS = (
ActionCommand(
name='get-value',
help='Print the value of the configuration',
func=lazy_load_command('airflow.cli.commands.config_command.get_value'),
args=(ARG_SECTION, ARG_OPTION, ),
),
ActionCommand(
name='list',
help='List options for the configuration',
func=lazy_load_command('airflow.cli.commands.config_command.show_config'),
args=(ARG_COLOR, ),
),
)
KUBERNETES_COMMANDS = (
ActionCommand(
name='generate-dag-yaml',
help="Generate YAML files for all tasks in DAG. Useful for debugging tasks without "
"launching into a cluster",
func=lazy_load_command('airflow.cli.commands.dag_command.generate_pod_yaml'),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_OUTPUT_PATH),
),
)
airflow_commands: List[CLICommand] = [
GroupCommand(
name='dags',
help='Manage DAGs',
subcommands=DAGS_COMMANDS,
),
GroupCommand(
name="kubernetes",
help='tools to help run the KubernetesExecutor',
subcommands=KUBERNETES_COMMANDS
),
GroupCommand(
name='tasks',
help='Manage tasks',
subcommands=TASKS_COMMANDS,
),
GroupCommand(
name='pools',
help="Manage pools",
subcommands=POOLS_COMMANDS,
),
GroupCommand(
name='variables',
help="Manage variables",
subcommands=VARIABLES_COMMANDS,
),
GroupCommand(
name='db',
help="Database operations",
subcommands=DB_COMMANDS,
),
ActionCommand(
name='kerberos',
help="Start a kerberos ticket renewer",
func=lazy_load_command('airflow.cli.commands.kerberos_command.kerberos'),
args=(ARG_PRINCIPAL, ARG_KEYTAB, ARG_PID, ARG_DAEMON, ARG_STDOUT, ARG_STDERR, ARG_LOG_FILE),
),
ActionCommand(
name='webserver',
help="Start a Airflow webserver instance",
func=lazy_load_command('airflow.cli.commands.webserver_command.webserver'),
args=(
ARG_PORT, ARG_WORKERS, ARG_WORKERCLASS, ARG_WORKER_TIMEOUT, ARG_HOSTNAME, ARG_PID,
ARG_DAEMON, ARG_STDOUT, ARG_STDERR, ARG_ACCESS_LOGFILE, ARG_ERROR_LOGFILE, ARG_LOG_FILE,
ARG_SSL_CERT, ARG_SSL_KEY, ARG_DEBUG
),
),
ActionCommand(
name='scheduler',
help="Start a scheduler instance",
func=lazy_load_command('airflow.cli.commands.scheduler_command.scheduler'),
args=(
ARG_DAG_ID_OPT, ARG_SUBDIR, ARG_NUM_RUNS, ARG_DO_PICKLE, ARG_PID, ARG_DAEMON, ARG_STDOUT,
ARG_STDERR, ARG_LOG_FILE
),
),
ActionCommand(
name='version',
help="Show the version",
func=lazy_load_command('airflow.cli.commands.version_command.version'),
args=(),
),
ActionCommand(
name='cheat-sheet',
help="Display cheat sheet",
func=lazy_load_command('airflow.cli.commands.cheat_sheet_command.cheat_sheet'),
args=(),
),
GroupCommand(
name='connections',
help="Manage connections",
subcommands=CONNECTIONS_COMMANDS,
),
GroupCommand(
name='users',
help="Manage users",
subcommands=USERS_COMMANDS,
),
GroupCommand(
name='roles',
help='Manage roles',
subcommands=ROLES_COMMANDS,
),
ActionCommand(
name='sync-perm',
help="Update permissions for existing roles and DAGs",
func=lazy_load_command('airflow.cli.commands.sync_perm_command.sync_perm'),
args=(),
),
ActionCommand(
name='rotate-fernet-key',
func=lazy_load_command('airflow.cli.commands.rotate_fernet_key_command.rotate_fernet_key'),
help='Rotate encrypted connection credentials and variables',
description=(
'Rotate all encrypted connection credentials and variables; see '
'https://airflow.readthedocs.io/en/stable/howto/secure-connections.html'
'#rotating-encryption-keys'
),
args=(),
),
GroupCommand(
name="config",
help='View configuration',
subcommands=CONFIG_COMMANDS
),
ActionCommand(
name='info',
help='Show information about current Airflow and environment',
func=lazy_load_command('airflow.cli.commands.info_command.show_info'),
args=(ARG_ANONYMIZE, ARG_FILE_IO, ),
),
ActionCommand(
name='plugins',
help='Dump information about loaded plugins',
func=lazy_load_command('airflow.cli.commands.plugins_command.dump_plugins'),
args=(),
),
GroupCommand(
name="celery",
help='Celery components',
description=(
'Start celery components. Works only when using CeleryExecutor. For more information, see '
'https://airflow.readthedocs.io/en/stable/executor/celery.html'
),
subcommands=CELERY_COMMANDS
)
]
ALL_COMMANDS_DICT: Dict[str, CLICommand] = {sp.name: sp for sp in airflow_commands}
DAG_CLI_COMMANDS: Set[str] = {
'list_tasks', 'backfill', 'test', 'run', 'pause', 'unpause', 'list_dag_runs'
}
class AirflowHelpFormatter(argparse.HelpFormatter):
"""
Custom help formatter to display help message.
It displays simple commands and groups of commands in separate sections.
"""
def _format_action(self, action: Action):
if isinstance(action, argparse._SubParsersAction): # pylint: disable=protected-access
parts = []
action_header = self._format_action_invocation(action)
action_header = '%*s%s\n' % (self._current_indent, '', action_header)
parts.append(action_header)
self._indent()
subactions = action._get_subactions() # pylint: disable=protected-access
action_subcommands, group_subcommands = partition(
lambda d: isinstance(ALL_COMMANDS_DICT[d.dest], GroupCommand), subactions
)
parts.append("\n")
parts.append('%*s%s:\n' % (self._current_indent, '', "Groups"))
self._indent()
for subaction in group_subcommands:
parts.append(self._format_action(subaction))
self._dedent()
parts.append("\n")
parts.append('%*s%s:\n' % (self._current_indent, '', "Commands"))
self._indent()
for subaction in action_subcommands:
parts.append(self._format_action(subaction))
self._dedent()
self._dedent()
# return a single string
return self._join_parts(parts)
return super()._format_action(action)
def get_parser(dag_parser: bool = False) -> argparse.ArgumentParser:
"""Creates and returns command line argument parser"""
parser = DefaultHelpParser(prog="airflow", formatter_class=AirflowHelpFormatter)
subparsers = parser.add_subparsers(dest='subcommand', metavar="GROUP_OR_COMMAND")
subparsers.required = True
subparser_list = DAG_CLI_COMMANDS if dag_parser else ALL_COMMANDS_DICT.keys()
sub_name: str
for sub_name in sorted(subparser_list):
sub: CLICommand = ALL_COMMANDS_DICT[sub_name]
_add_command(subparsers, sub)
return parser
def _sort_args(args: Iterable[Arg]) -> Iterable[Arg]:
"""
Sort subcommand optional args, keep positional args
"""
def get_long_option(arg: Arg):
"""
Get long option from Arg.flags
"""
return arg.flags[0] if len(arg.flags) == 1 else arg.flags[1]
positional, optional = partition(lambda x: x.flags[0].startswith("-"), args)
yield from positional
yield from sorted(optional, key=lambda x: get_long_option(x).lower())
def _add_command(
subparsers: argparse._SubParsersAction, # pylint: disable=protected-access
sub: CLICommand
) -> None:
sub_proc = subparsers.add_parser(
sub.name, help=sub.help, description=sub.description or sub.help, epilog=sub.epilog
)
sub_proc.formatter_class = RawTextHelpFormatter
if isinstance(sub, GroupCommand):
_add_group_command(sub, sub_proc)
elif isinstance(sub, ActionCommand):
_add_action_command(sub, sub_proc)
else:
raise AirflowException("Invalid command definition.")
def _add_action_command(sub: ActionCommand, sub_proc: argparse.ArgumentParser) -> None:
for arg in _sort_args(sub.args):
arg.add_to_parser(sub_proc)
sub_proc.set_defaults(func=sub.func)
def _add_group_command(sub: GroupCommand, sub_proc: argparse.ArgumentParser) -> None:
subcommands = sub.subcommands
sub_subparsers = sub_proc.add_subparsers(dest="subcommand", metavar="COMMAND")
sub_subparsers.required = True
for command in sorted(subcommands, key=lambda x: x.name):
_add_command(sub_subparsers, command)
| python | 52,413 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_agent import shell
from shell import shellRunner
from sys import platform as _platform
import subprocess, time
class TestShell(unittest.TestCase):
@patch("os.setuid")
def test_changeUid(self, os_setUIDMock):
shell.threadLocal.uid = 9999
shell.changeUid()
self.assertTrue(os_setUIDMock.called)
@patch("pwd.getpwnam")
def test_shellRunner_run(self, getpwnamMock):
sh = shellRunner()
result = sh.run(['echo'])
self.assertEquals(result['exitCode'], 0)
self.assertEquals(result['error'], '')
getpwnamMock.return_value = [os.getuid(), os.getuid(), os.getuid()]
result = sh.run(['echo'], 'non_exist_user_name')
self.assertEquals(result['exitCode'], 0)
self.assertEquals(result['error'], '')
def test_kill_process_with_children(self):
if _platform == "linux" or _platform == "linux2": # Test is Linux-specific
gracefull_kill_delay_old = shell.gracefull_kill_delay
shell.gracefull_kill_delay = 0.1
sleep_cmd = "sleep 314159265"
test_cmd = """ (({0}) & ({0} & {0})) """.format(sleep_cmd)
# Starting process tree (multiple process groups)
test_process = subprocess.Popen(test_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
time.sleep(0.3) # Delay to allow subprocess to start
# Check if processes are running
ps_cmd = """ps aux """
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertTrue(sleep_cmd in out)
# Kill test process
shell.kill_process_with_children(test_process.pid)
test_process.communicate()
# Now test process should not be running
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertFalse(sleep_cmd in out)
shell.gracefull_kill_delay = gracefull_kill_delay_old
else:
# Do not run under other systems
pass
| python | 2,980 |
"""
****************************************************************************************************
:copyright (c) 2019-2020 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import logging
import os
from geojson_modelica_translator.geojson.urbanopt_geojson import (
UrbanOptGeoJson
)
from geojson_modelica_translator.modelica.input_parser import PackageParser
from geojson_modelica_translator.scaffold import Scaffold
_log = logging.getLogger(__name__)
class GeoJsonModelicaTranslator(object):
"""
Main class for using the GeoJSON to Modelica Translator.
"""
def __init__(self):
self.buildings = []
# directory name member variables. These are set in the scaffold_directory method
self.scaffold = None
self.system_parameters = None
@classmethod
def from_geojson(cls, filename):
"""
Initialize the translator from a GeoJSON file
:param filename: string, GeoJSON file
:return: object, GeoJsonModelicaTranslator
"""
if os.path.exists(filename):
json = UrbanOptGeoJson(filename)
klass = GeoJsonModelicaTranslator()
klass.buildings = json.buildings
return klass
else:
raise Exception(f"GeoJSON file does not exist: {filename}")
def set_system_parameters(self, sys_params):
"""
Read in the system design parameter data
:param SystemParameters: SystemParameters object
"""
self.system_parameters = sys_params
def scaffold_directory(self, root_dir, project_name, overwrite=False):
"""
Scaffold out the initial directory and set various helper directories
:param root_dir: string, absolute path where the project will be scaffolded.
:param project_name: string, name of the project that is being created
:return: string, path to the scaffold directory
"""
self.scaffold = Scaffold(root_dir, project_name)
self.scaffold.create()
return self.scaffold.project_path
def to_modelica(self, project_name, save_dir, model_connector_str="TeaserConnector"):
"""
Convert the data in the GeoJSON to modelica based-objects
:param save_dir: str, directory where the exported project will be stored. The name of the project will be
{save_dir}/{project_name}
:param model_connector_str: str, which model_connector to use
"""
self.scaffold_directory(save_dir, project_name)
# TODO: Create a map to load the required model_connectors
import geojson_modelica_translator.model_connectors.teaser
import geojson_modelica_translator.model_connectors.spawn
mc_klass = getattr(geojson_modelica_translator.model_connectors.teaser, model_connector_str)
model_connector = mc_klass(self.system_parameters)
_log.info("Exporting to Modelica")
for building in self.buildings:
# print("Jing2: ", building.feature.properties["type"])
_log.info(f"Adding building to model connector: {mc_klass.__class__}")
model_connector.add_building(building)
_log.info(f"Translating building to model {building}")
model_connector.to_modelica(self.scaffold, keep_original_models=False)
# add in Substations
# TODO: YL, where are the substations/ETSs?
# add in Districts
# add in Plants
# now add in the top level package.
pp = PackageParser.new_from_template(self.scaffold.project_path, project_name, ["Loads"])
pp.save()
# TODO: BuildingModelClass
# TODO: mapper class
# TODO: lookup tables / data sets
| python | 5,320 |
# Copyright 2017 Sascha Schweitzer
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General packages (packages for pandas data formats and graph plotting will be loaded later if required)
import time
import collections
from datetime import datetime
import re
import sys
import csv
import copy
import pytz
# Helper packages included in this package
from persons.support_functions.string_tools import * # functions: normalize(string), remove_particles(string)
from persons.support_functions.graph_functions import * # class: Graph, init with Graph(matrix), functions: transitive_reduction(self) - works only for transitive closure, get_single_strands(self)
class Persons(object):
def __init__(self):
#############################################################
### Parameters #################################
#####################################
# Public
# Transformations of the input
self.remove_particles_suffixes = True
self.normalize_names = True
# Relation of the first name elements
self.only_first_fnm = False
self.middle_name_rule = False # Only match names if middle initial identical (cf. Jones, 2009)
self.match_subsets = True # Names with less information that are consistent are matched
self.match_interlaced = False # Names that are not identical, but have same amount/quantity of information are matched
self.ignore_order_of_forenames = False # Ignore order of first names / middle names
# sub-parameter for subsets/interlaced matching (the initials, as long as they exist, of the matching names should have the same positions. Because USUALLY the frontest names are written out)
# David & J. David are not allowed, because the first initials are different.
# Alber Louis J. & Alber James are not allowed, because the second initials are different.
self.absolute_position_matters = True
#####################################
# Private
# Relation of the last name elements (experimental, do not use yet)
self._detect_marriages = False
self._accept_devorce = True
# remove empty clusters
self._empty_clusters_remove = True
# Further criteria that may distinguish persons (experimental, do not use yet)
self._split_by_time_gap = False
self._maximum_time_gap = 50
# Technical parameters
self._max_graph_size = 50
self._table_with_unique_names = "known persons table"
# Coding of the adjacency matrix (no parameter, do not change)
self._it_subset = 1
self._me_subset = -1
# Timezone and time format, used in the saved data
self._tz = pytz.timezone('Europe/Berlin')
self._fmt = '%Y-%m-%d %H:%M:%S %Z%z'
# two family names with a dash or with a space in between
self._marriage_name_pattern = re.compile(r"^[a-z]{2,}[\s][a-z]{2,}$")
##########################################################
### Internal functions ################################
def _debug_condition(snm, fnm):
''' Returns if a value should be printed for debugging
'''
global debug, snm_debug, fnm_debug
return debug and snm==snm_debug and fnm.find("M")>-1
def _make_flat_tree(self, input_data, names, source):
''' Make a dictionarytree with the levels . -> [last name] -> [initial first first name] -> ["records"] / ["matrix"] .
The matrix contains a list of vectors. Each vector compares the corresponding entry of ["records"] to the other records.
'''
# Mapping for converting the perspective of the name comparison
flip = { self._it_subset : self._me_subset,
self._me_subset : self._it_subset,
"crossed" : "crossed",
"different" : "different"
}
# Iterate over records from the database
for row in input_data:
#############################################################
# Prepare data
# only if it's not newly added virtual record
if "virtual_row_nr" not in row:
# Add tag for the record source
row["source"] = source["source_type"]
# Change name of the year column to 'year'
if source["columns"]["year_column"] is not None and row[source["columns"]["year_column"]]!="":
row["year"] = int( row.pop(source["columns"]["year_column"]) )
# Change name of the first name column to 'fnm'
row["fnm"] = row.pop(source["columns"]["fnm_column"])
# Change name of the last name column to 'snm'
row["snm"] = row.pop(source["columns"]["snm_column"])
# Change name of the ID column to 'id'
row["id"] = row.pop(source["columns"]["id_column"])
if source["name_format"]=="fnm, middle and snm":
if "str" in str(type(row[source["columns"]["mnm_column"]])):
# Change name of the middle name column to 'mnm'
row["mnm"] = row.pop(source["columns"]["mnm_column"])
# Strip of blanks at end and beginning
row["mnm"] = row["mnm"].strip()
# Combine first and middle name
row["fnm"] += " " + row["mnm"]
else:
row["mnm"] = ""
# Normalize first name
if row["fnm"] is not None:
if self.only_first_fnm:
row["fnm_normalized"] = normalize(row["fnm"]).split(" ")[0]
else:
row["fnm_normalized"] = normalize(row["fnm"])
else:
continue
# Normalize last name
if row["snm"] is not None:
row["snm_normalized"] = normalize(row["snm"])
else:
continue
# Process entry only if it's not empty
if not(
row["fnm"]=="" or\
row["snm"]==""
):
#int(row["id"]) in [20509,8105,20507,4505,7402,31386]):
if self.remove_particles_suffixes:
row["snm_normalized"] = remove_particles(row["snm_normalized"])
#create virtual records for possible born names (family names)
if self._detect_marriages:
if self._marriage_name_pattern.match(row["snm_normalized"]) is not None:
# to mark it that the row has been detected to have possible marriage name
row["split_for_detecting_marriage"] = 1
# if there are two normal family names after being split
if len(row["snm_normalized"].split()) == 2:
#create a new false (virtual) record for every possible born name
virtual_rows_to_be_appended = []
for x in range(len(row["snm_normalized"].split())):
copyrow = copy.deepcopy(row)
#copy all infos of this record into the virtual record
virtual_rows_to_be_appended.append(copyrow)
#give the virtual row an virtual-row-ID for identification
virtual_rows_to_be_appended[-1]["virtual_row_nr"] = x
#give the virtual record the possible born family name as it's family name
virtual_rows_to_be_appended[-1]["snm_normalized"] = (row["snm_normalized"].split())[x]
#add a column to store the family name in the original record
virtual_rows_to_be_appended[-1]["original_snm"] = row["snm_normalized"]
#delete "split_for_detecting_marriage" because this record is not original
del virtual_rows_to_be_appended[-1]["split_for_detecting_marriage"]
#add the virtual records into the input data
for virtual_record in virtual_rows_to_be_appended:
input_data.append(virtual_record)
############################################################
# Build tree
# -1 indicates that the record doesn't belong to a cluster yet
row["cluster"]=-1
#Level of the last name
if row["snm_normalized"] not in names.keys():
names[row["snm_normalized"]] = {} # New last name
# The matrix indices are node indices -> each node contains all equally names records (two-sided mapping given by records_by_node and node_by_record)
names[row["snm_normalized"]] = {"records": [row], "matrix": [["identical"]], "records_by_node": [[0]], "node_by_record": [0]}
# If existing first letter, add record to structure and matrix
else:
# Assume we are dealing with a new node
new_node = True
# Create easy reference to the node name / record name mapping
records_by_node = names[row["snm_normalized"]]["records_by_node"]
node_by_record = names[row["snm_normalized"]]["node_by_record"]
# Record number (= length of the existing list)
record_number = len(names[row["snm_normalized"]]["records"])
# Add the new record to the tree
names[row["snm_normalized"]]["records"] . append(row)
# Start new node (will be removed again, if record turns out to be equal to existing node)
records_by_node . append([record_number])
# Start new row in matrix (will be removed again, if record turns out to be equal to existing node)
names[row["snm_normalized"]]["matrix"] . append([])
########################################################
## Comparison of all record-existing node combinations (comparison matrix ["matrix"])
# Iterate over all existing nodes
for existing_node_index in range( len( names[row["snm_normalized"]]["matrix"] )-1 ):
# Compare new record to an existing record (from the perspecitve of the new entry)
comparison_result=self._compare(row["fnm_normalized"], names[row["snm_normalized"]]["records"][ records_by_node[existing_node_index][0] ]["fnm_normalized"])
#print("comparing:", row["fnm_normalized"], names[row["snm_normalized"]]["records"][ records_by_node[existing_node_index][0] ]["fnm_normalized"],comparison_result)
# If row is equal to existing
if comparison_result=="equal":
# Whoa, wait, this is not a new node
new_node = False
# Map to an existing node
records_by_node[existing_node_index].append(record_number)
#original: node_by_record.append(node_by_record[existing_node_index])
node_by_record.append(existing_node_index)
# Remove the matrix row added for the wrongly assumed new node
names[row["snm_normalized"]]["matrix"].pop(-1)
# Remove the new node added for the wrongly assumed new node
records_by_node . pop(-1) # WL: the record number of the one, whose forename has never appeared
# This is all an Alter Hut, let's not waste our time here with more comparisons
#print("equal",row,names[row["snm_normalized"]]["records"][records_by_node[existing_node_index][0] ])
break
else:
# Append to the new record's matrix vector (horizontal part of the matrix)
names[row["snm_normalized"]]["matrix"][-1].append(comparison_result)
if new_node:
# Map current record to a new node
node_by_record . append( len(records_by_node)-1 )
# Add comparison to the new node itself (it's not only equal, but identical to itself)
names[row["snm_normalized"]]["matrix"][-1].append("identical")
# Fill the vertical parts of the existing nodes vectors with the additional entry (kind of a waste, but we like squares - go Spongebob, go!)
for existing_node_index in range( len( names[row["snm_normalized"]]["matrix"] )-1 ):
names[row["snm_normalized"]]["matrix"][existing_node_index].append( flip[ names[row["snm_normalized"]]["matrix"][-1][existing_node_index] ] )
# remove snm_key, which only contains virtual records
if self._detect_marriages:
for snm_key in sorted(names):
try:
if all (names[snm_key]["records"][y]["virtual_row_nr"] >= 0 for y in range(len(names[snm_key]["records"]))):
names.pop(snm_key)
except:
pass
def _find_interrelated(self, names, snm_key, to_process, relevant_relations, matching_code=set()): # WL: set(): disordered without repetition
''' Find all nodes that are interrelated (to the first node to be processed and each other)
'''
interrelated = set( [to_process[0]] )
interrelated_new = set( [to_process[0]] )
to_process . remove(to_process[0])
while len(interrelated_new)>0:
temp = set()
# Iterate over all new items
for i_node in interrelated_new:
# Iterate over all not assigned items
for i_other_node in to_process:
# Check if the item to be compared matches an item of the interrelated cluster
if i_other_node not in temp and names[snm_key]["matrix"][i_node][i_other_node] in relevant_relations:
temp.add(i_other_node)
if names[snm_key]["matrix"][i_node][i_other_node]=="crossed":
matching_code.add("interlaced")
elif names[snm_key]["matrix"][i_node][i_other_node] in [self._me_subset, self._it_subset]:
matching_code.add("vertical")
# Remove items assigned to the interrelated cluster from the list of items to be processed
for item in temp:
to_process.remove(item)
# Store items assigned as the new related items
interrelated_new = temp
interrelated . update(interrelated_new)
return interrelated
def _cluster(self, names, cluster_list, cluster_number_list):
''' Cluster records from the given tree
'''
######################################################
# Identify related names. Sort into same cluster if compatible. Mark as ambiguous if incompatible.
# Level of the last name
for snm_key in sorted(names):
# Nodes to be processed
to_be_processed = list( range( len(names[snm_key]["records_by_node"]) ) )
# Create easy reference to the node name / record name mapping
records_by_node = names[snm_key]["records_by_node"]
node_by_record = names[snm_key]["node_by_record"]
while len(to_be_processed)>0:
# None of the interrelated items is "different" / mutually exclusive from the other (per interrelated group)
interrelated_consistent = True
# Initialize tag reporting which relationships occured in the matching
matching_code = set(["equal"])
#####################################################################
# Match all related items (match_subsets_and_interlaced==True)
if self.match_interlaced and self.match_subsets and interrelated_consistent:
# original: if match_subsets_and_interlaced and interrelated_consistent:
# Break condition for the case a pure subset is removed (move back to while loop in that case)
pure_subset_removed = False
# Reset the matching code
matching_code = set(["equal"])
# Find all nodes that are interrelated (to the first node to be processed and each other)
interrelated = self._find_interrelated(names, snm_key, to_be_processed, ["identical", self._me_subset, self._it_subset, "crossed"], matching_code)
#########################################
# Check consistency of the set of interrelated items
# Find pure subsets with conflicting supersets
for item in interrelated:
# Only for pure subsets
if self._it_subset not in names[snm_key]["matrix"][item] and "crossed" not in names[snm_key]["matrix"][item] and not pure_subset_removed:
# Compare all their supersets
for first in interrelated:
if names[snm_key]["matrix"][item][first]==self._me_subset and not pure_subset_removed:
for second in interrelated:
if names[snm_key]["matrix"][item][second]==self._me_subset and not pure_subset_removed:
# If the supersets of the pure subset are conflicting
if names[snm_key]["matrix"][first][second]=="different":
pure_subset_removed = True
item_to_remove = item
# _find_interrelated might not have checked all possible pairs for "crossed" relationships, therefore add this info to matching_code
elif names[snm_key]["matrix"][first][second]=="crossed":
matching_code.add("interlaced")
# Remove the pure subset
if pure_subset_removed:
# Assign the pure subset to a cluster
for i_record in records_by_node[item_to_remove]:
# Reset the matching code
matching_code = set(["equal"])
# Assign cluster to record
names[snm_key]["records"][i_record]["cluster"]=self._cluster_number
names[snm_key]["records"][i_record]["matching"]=matching_code
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages:
if "split_for_detecting_marriage" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
cluster_number_list[names[snm_key]["records"][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
if names[snm_key]["records"][i_record]["virtual_row_nr"] == 0:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][2] = self._cluster_number
# Check if cluster exists in list of clusters (and add if non-existent)
if self._cluster_number not in cluster_list.keys():
cluster_list[self._cluster_number]=[]
# Append the record to the cluster list
cluster_list[self._cluster_number].append(names[snm_key]["records"][i_record])
# Continue with next cluster number
self._cluster_number +=1
# Remove from the set of interrelated items
interrelated.remove(item_to_remove)
# Change matrix to make the item different
for element in range( len(names[snm_key]["matrix"][item_to_remove])):
if element!=item_to_remove:
names[snm_key]["matrix"][item_to_remove][element]="different"
names[snm_key]["matrix"][element][item_to_remove]="different"
# Add the other interrelated items to the items to be processed
to_be_processed = to_be_processed + list(interrelated)
# If a pure subset has been removed, go back to the while loop
else:
# For all pairs
for first in interrelated:
for second in interrelated:
# Check their consistency
if names[snm_key]["matrix"][first][second]=="different":
interrelated_consistent = False
# Set of interrelated needs to be processed again (in the code for single-strand matching below)
to_be_processed_level_2 = list( interrelated.copy() )
# If interrelated_consistent all entries get the same id
if interrelated_consistent:
# Assign a new cluster number
for i_node in interrelated:
# Iterate over all records in the node (equal names)
for i_record in records_by_node[i_node]:
# Assign cluster to record
names[snm_key]["records"][i_record]["cluster"]=self._cluster_number
names[snm_key]["records"][i_record]["matching"]=matching_code
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages:
if "split_for_detecting_marriage" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
cluster_number_list[names[snm_key]["records"][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
if names[snm_key]["records"][i_record]["virtual_row_nr"] == 0:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][2] = self._cluster_number
# Check if cluster exists in list of clusters (and add if non-existent)
if self._cluster_number not in cluster_list.keys():
cluster_list[self._cluster_number]=[]
# Append the record to the cluster list
cluster_list[self._cluster_number].append(names[snm_key]["records"][i_record])
# Continue with next cluster number
self._cluster_number+=1
#####################################################################
# Match subsets (match_subsets_and_interlaced==False and match_subsets=True)
if self.match_subsets and ( self.match_interlaced==False or not(interrelated_consistent) ):
# Process only interrelated items from the previous interlaced part (if with interlaced) or process all items
if not(self.match_interlaced):
to_be_processed_level_2 = to_be_processed
while len(to_be_processed_level_2)>0:
# Break condition for the case a pure subset is removed (move back to while loop in that case)
pure_subset_removed = False
# Find all nodes that are interrelated (to the first node to be processed and each other)
interrelated = self._find_interrelated(names, snm_key, to_be_processed_level_2, ["identical", self._me_subset, self._it_subset], set() )
# Find pure subsets with conflicting supersets
for item in interrelated:
# Only for pure subsets
if self._it_subset not in names[snm_key]["matrix"][item] and "crossed" not in names[snm_key]["matrix"][item] and not pure_subset_removed:
# Compare all their supersets
for first in interrelated:
if names[snm_key]["matrix"][item][first]==self._me_subset and not pure_subset_removed:
for second in interrelated:
if names[snm_key]["matrix"][item][second]==self._me_subset and not pure_subset_removed:
# If the supersets of the pure subset are conflicting
if names[snm_key]["matrix"][first][second]=="different":
pure_subset_removed = True
item_to_remove = item
# Remove the pure subset
if pure_subset_removed:
# Assign the pure subset to a cluster
for i_record in records_by_node[item_to_remove]:
# Assign cluster to record
names[snm_key]["records"][i_record]["cluster"]=self._cluster_number
names[snm_key]["records"][i_record]["matching"]=matching_code
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages:
if "split_for_detecting_marriage" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
cluster_number_list[names[snm_key]["records"][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
if names[snm_key]["records"][i_record]["virtual_row_nr"] == 0:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][2] = self._cluster_number
# Check if cluster exists in list of clusters (and add if non-existent)
if self._cluster_number not in cluster_list.keys():
cluster_list[self._cluster_number]=[]
# Append the record to the cluster list
cluster_list[self._cluster_number].append(names[snm_key]["records"][i_record])
# Continue with next cluster number
self._cluster_number +=1
# Remove from the set of interrelated items
interrelated.remove(item_to_remove)
# Change matrix to make the item different
for element in range( len(names[snm_key]["matrix"][item_to_remove])):
if element!=item_to_remove:
names[snm_key]["matrix"][item_to_remove][element]="different"
names[snm_key]["matrix"][element][item_to_remove]="different"
# Add the other interrelated items to the items to be processed
to_be_processed_level_2 = to_be_processed_level_2 + list(interrelated)
# If a pure subset has been removed, go back to the while loop
else:
########################################
# Search for chains of subsets (without forks)
if len(interrelated)>1 and len(interrelated)<=self._max_graph_size:
# Graph (create from adjacency matrix)
G = Graph(names[snm_key]["matrix"], list(interrelated))
# Transitive reduction
G . transitive_reduction()
# Single stranded parts of the graph
single_strands = G.get_single_strands()
# To big graphs are bad
elif len(interrelated)>self._max_graph_size:
single_strands = [ [x] for x in interrelated]
# If there is only one node, no graph needed (case is redundant with the previous one)
else:
single_strands = [list(interrelated)]
#######################################
# Assign cluster numbers to the cleaned clusters
for strand in single_strands:
# If only one element, no vertical relationship
if len(strand)==1:
matching_code = set(["equal"])
else:
matching_code = set(["vertical"])
# Assign a new cluster number
for i_node in strand:
# Iterate over all records in the node (equal names)
for i_record in records_by_node[i_node]:
# Assign cluster to record
names[snm_key]["records"][i_record]["cluster"]=self._cluster_number
names[snm_key]["records"][i_record]["matching"]=matching_code
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages:
if "split_for_detecting_marriage" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
cluster_number_list[names[snm_key]["records"][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
if names[snm_key]["records"][i_record]["virtual_row_nr"] == 0:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][2] = self._cluster_number
# Check if cluster exists in list of clusters (and add if non-existent)
if self._cluster_number not in cluster_list.keys():
cluster_list[self._cluster_number]=[]
# Append the record to the cluster list
cluster_list[self._cluster_number].append(names[snm_key]["records"][i_record])
# Continue with next cluster number
self._cluster_number+=1
# If only subsets are matched, sync back items to be processed
if not(self.match_interlaced):
to_be_processed = to_be_processed_level_2
# If neither interlaced nor subsets shall be matched, match only the equal/identical entries
if not(self.match_subsets):
# Process all items until none is left
for i_node in to_be_processed:
for i_record in records_by_node[i_node]:
names[snm_key]["records"][i_record]["cluster"]=self._cluster_number
names[snm_key]["records"][i_record]["matching"]=matching_code
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages:
if "split_for_detecting_marriage" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
cluster_number_list[names[snm_key]["records"][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in names[snm_key]["records"][i_record]:
if names[snm_key]["records"][i_record]["id"] not in cluster_number_list:
cluster_number_list[names[snm_key]["records"][i_record]["id"]] = {}
if names[snm_key]["records"][i_record]["virtual_row_nr"] == 0:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[names[snm_key]["records"][i_record]["id"]][2] = self._cluster_number
# Check if cluster exists in list of clusters (and add if non-existent)
if self._cluster_number not in cluster_list.keys():
cluster_list[self._cluster_number]=[]
# Append the record to the cluster list
cluster_list[self._cluster_number].append(names[snm_key]["records"][i_record])
# Continue with next cluster number
self._cluster_number+=1
# End the while loop
break
######################################################
## Split or invalidate clusters with multiple distinct persons
copy_keys_cluster_list=list(cluster_list.keys()) # copy to avoid changing of iterator during iteration, deep is not required
for i_cluster in copy_keys_cluster_list:
# Collect unique entries
known_unique = [record for record in cluster_list[i_cluster] if record["source"]==self._table_with_unique_names]
# If multiple phds in the cluster
if len(known_unique)>1:
# Create new clusters for unique entries
indices_new_clusters = []
for record in known_unique:
record["matching"]=set(["multiple known persons separated"])
record["cluster"] = self._cluster_number
cluster_list[self._cluster_number]= [record]
indices_new_clusters.append(self._cluster_number)
# Increase cluster number
self._cluster_number +=1
# Remove known unique from previous cluster
cluster_list[i_cluster].remove(record)
# Move those records to the new clusters that are equal
for index_new_cluster in indices_new_clusters:
unique_fnm = cluster_list[index_new_cluster][0]["fnm_normalized"]
for record in cluster_list[i_cluster]:
non_unique_fnm = record["fnm_normalized"]
# Check for equality
if self._compare(unique_fnm, non_unique_fnm)=="equal":
record["matching"]=set(["multiple known persons separated"])
record["cluster"] = index_new_cluster
cluster_list[index_new_cluster].append(record)
cluster_list[i_cluster].remove(record)
else:
record["matching"]=set(["moved from multiple known persons"])
######################################################
## Marriage detection
if self._detect_marriages:
# clean up the clusters, which contain only virtual records
for cluster in sorted(cluster_list):
try:
if all (cluster_list[cluster][y]["virtual_row_nr"] >= 0 for y in range(len(cluster_list[cluster]))):
for record in cluster_list[cluster]:
#print(cluster_number_list[record["id"]])
del cluster_number_list[record["id"]][record["virtual_row_nr"]+1]
#print("after deleting", cluster_number_list[record["id"]])
del cluster_list[cluster][:]
except:
pass
# find out the original records, whose virtual records were both cleaned up
for cluster in sorted(cluster_number_list):
if len(cluster_number_list[cluster]) < 2:
for record in range(len(cluster_list[cluster_number_list[cluster][0]])):
cluster_list[cluster_number_list[cluster][0]][record]["split_for_detecting_marriage"] = "possible born surname not found"
del cluster_number_list[cluster]
def _compare(self, me, it):
''' Comparison of first names from the perspective of the first parameter
'''
if me==it:
return "equal"
elif not( self.middle_name_rule or self.match_subsets or self.match_interlaced ):
return "different"
me=me.split(" ")
it=it.split(" ")
# If me and it do neither share a full name nor an initial, they are different
# Simple version (equality has been tested above)
if len(me)==1 and len(it)==1 and me[0][0:1]!=it[0][0:1]:
return "different"
# General version of completely different
if len( set(me + [x[0:1] for x in me]).intersection(set(it +[x[0:1] for x in it])) )==0:
return "different"
# Common case that first firstname equal and second missing or initial
if len(me)<3 and len(it)<3 and me[0]==it[0] and not self.middle_name_rule:
# If one has only one first name, it's a subset
if len(me)==1:
return self._me_subset
elif len(it)==1:
return self._it_subset
# If one name has an initial as second first name that matches the other second first name, it's a subset
elif len(me[1])==1 and me[1]==it[1][0:1]:
return self._me_subset
elif len(it[1])==1 and it[1]==me[1][0:1]:
return self._it_subset
# If first name is equal and all middle names have the same initial (Jone's rule)
if self.middle_name_rule:
if len(me)>1 and len(it)>1 and len(me)==len(it):
# If first names are equal
if me[0]==it[0]:
# Iterate over all middle names
for index in range( 1,len(me) ):
# If one of the initials differ, names are different
if me[index][0:1]!=it[index][0:1]:
return "different"
# If first names are different
else:
return "different"
# If not the same number of first names
else:
return "different"
# If none of the middle name initials is different
return "equal"
# If first names can be in different order and subsets play a role
elif self.ignore_order_of_forenames:
# Check how me_parts relate to the other
part_comparison_me = []
for index_first in range(len(me)):
first = me[index_first]
part_comparison_me .append("unknown")
copy_it = it[:]
# Check if first of me is somewhere in 'it'
index_second = 0
while index_second < len(copy_it):
second = copy_it[index_second]
if first==second:
part_comparison_me[index_first] = "equal"
copy_it.pop(index_second)
break
elif first==second[0:1]:
part_comparison_me[index_first] = "me_initial"
copy_it.pop(index_second)
break
elif first[0:1]==second:
part_comparison_me[index_first] = "it_initial"
copy_it.pop(index_second)
break
index_second +=1
# Check how it_parts relate to me
part_comparison_it = []
for index_first in range(len(it)):
first = it[index_first]
part_comparison_it .append("unknown")
copy_me = me[:]
# Check if first of it is somewhere in 'me'
index_second = 0
while index_second < len(copy_me):
second = copy_me[index_second]
if first==second:
part_comparison_it[index_first] = "equal"
copy_me.pop(index_second)
break
elif first==second[0:1]:
part_comparison_it[index_first] = "it_initial"
copy_me.pop(index_second)
break
elif first[0:1]==second:
part_comparison_it[index_first] = "me_initial"
copy_me.pop(index_second)
break
index_second +=1
# Convert to set (for using set functions)
part_comparison_me = set(part_comparison_me)
part_comparison_it = set(part_comparison_it)
part_comparison_all = part_comparison_it.union(part_comparison_me)
# it shorter (I have more names) - it should be subset, unless elements are not in me (different) or I'm subset as well (crossed)
if len(me) > len(it):
if "unknown" in part_comparison_it:
return "different"
elif "me_initial" in part_comparison_all:
return "crossed"
else:
return self._it_subset
# me shorter (I have fewer names)
elif len(me) < len(it):
if "unknown" in part_comparison_me:
return "different"
elif "it_initial" in part_comparison_all:
return "crossed"
else:
return self._me_subset
else:
if len( part_comparison_all.difference(set(["equal"])) )==0:
return "equal"
elif "unknown" in part_comparison_all:
return "different"
elif "me_initial" in part_comparison_all and "it_initial" in part_comparison_all:
return "crossed"
elif "me_initial" in part_comparison_all:
return self._me_subset
elif "it_initial" in part_comparison_all:
return self._it_subset
# If order of the first names needs to be respected and subsets play a role
else:
comparing_continue = True
# if positions of initials should be checked
if self.absolute_position_matters:
for initial_position in range(min(len(me),len(it))):
if me[initial_position][0] != it[initial_position][0]:
return "different"
comparing_continue = False
break
# if positions are the same or if position doesn't play a role
if comparing_continue:
index_last_found = 0
part_comparison_me = []
for index_first in range( len(me) ):
first = me[index_first]
part_comparison_me .append("unknown")
# Check if first of me is somewhere in 'it'
if index_last_found < len(it):
for index_second in range( index_last_found, len(it) ):
second = it[index_second]
if first==second:
part_comparison_me[index_first] = "equal"
index_last_found = index_second+1
break
elif first==second[0:1]:
part_comparison_me[index_first] = "me_initial"
index_last_found = index_second+1
break
elif first[0:1]==second:
part_comparison_me[index_first] = "it_initial"
index_last_found = index_second+1
break
# it shorter (I have more names)
if len(me) > len(it):
# If it has some part that don't match me (even though it is smaller)
if (len(part_comparison_me) - part_comparison_me.count("unknown")) < len(it):
return "different"
elif "me_initial" in part_comparison_me:
return "crossed"
else:
return self._it_subset
# me shorter (I have fewer names)
elif len(me) < len(it):
if "unknown" in part_comparison_me:
return "different"
elif "it_initial" in part_comparison_me:
return "crossed"
else:
return self._me_subset
else:
if "unknown" in part_comparison_me:
return "different"
elif "me_initial" in part_comparison_me and "it_initial" in part_comparison_me:
return "crossed"
elif "me_initial" in part_comparison_me:
return self._me_subset
elif "it_initial" in part_comparison_me:
return self._it_subset
def _time_gap(self, cluster_list, maximum_time_gap, cluster_number_list, action="report"):
''' Save cluster_list to the authors table
'''
# Original keys of the cluster list for the iteration
original_keys = list(cluster_list.keys())
for i_cluster in original_keys:
# Sort records in the cluster by their year stamp
cluster_list[i_cluster] = sorted(cluster_list[i_cluster], key=lambda k: k['year'])
if action=="split":
# Memorize when new cluster started
new_cluster_started = False
# Collect items to be removed
to_be_removed = []
for i_record in range( len(cluster_list[i_cluster]) ):
# If new cluster has been started, sort all further items into it
if new_cluster_started:
cluster_list[i_cluster][i_record]["cluster"] = self._cluster_number
# record the cluster nr and id for rework for marriage name later
if self._detect_marriages and cluster_list[i_cluster][i_record]["id"] in cluster_number_list:
if "split_for_detecting_marriage" in cluster_list[i_cluster][i_record]:
cluster_number_list[cluster_list[i_cluster][i_record]["id"]][0] = self._cluster_number
elif "virtual_row_nr" in cluster_list[i_cluster][i_record]:
if cluster_list[i_cluster][i_record]["virtual_row_nr"] == 0:
cluster_number_list[cluster_list[i_cluster][i_record]["id"]][1] = self._cluster_number
else:
cluster_number_list[cluster_list[i_cluster][i_record]["id"]][2] = self._cluster_number
cluster_list[self._cluster_number] . append(cluster_list[i_cluster][i_record])
to_be_removed . append(cluster_list[i_cluster][i_record])
# If time gap between two records exceeds limit
if i_record < len(cluster_list[i_cluster])-1 and int(cluster_list[i_cluster][i_record+1]['year']) - int(cluster_list[i_cluster][i_record]['year']) > maximum_time_gap:
# Start new cluster
new_cluster_started = True
self._cluster_number +=1
# Add new cluster
cluster_list[self._cluster_number] = []
if new_cluster_started:
for record in cluster_list[i_cluster]:
record["matching"].add("split at time gap")
for i_record in to_be_removed:
cluster_list[i_cluster].remove(i_record)
if action=="report":
# Maximum time gap observed
max_gap_observed = 0
for i_record in range( len(cluster_list[i_cluster])-1 ):
if int(cluster_list[i_cluster][i_record+1]['year']) - int(cluster_list[i_cluster][i_record]['year']) > max_gap_observed:
max_gap_observed = int(cluster_list[i_cluster][i_record+1]['year']) - int(cluster_list[i_cluster][i_record]['year'])
for i_record in range( len(cluster_list[i_cluster]) ):
cluster_list[i_cluster][i_record]['maximum_time_gap'] = max_gap_observed
if self._detect_marriages:
# clean up the clusters, which contain only virtual records
for cluster in sorted(cluster_list):
try:
if all (cluster_list[cluster][y]["virtual_row_nr"] >= 0 for y in range(len(cluster_list[cluster]))):
for record in cluster_list[cluster]:
#print(cluster_number_list[record["id"]])
del cluster_number_list[record["id"]][record["virtual_row_nr"]+1]
#print("after deleting", cluster_number_list[record["id"]])
del cluster_list[cluster][:]
except:
pass
# find out the original records, whose virtual records were both cleaned up
for cluster in sorted(cluster_number_list):
if len(cluster_number_list[cluster]) < 2:
for record in range(len(cluster_list[cluster_number_list[cluster][0]])):
cluster_list[cluster_number_list[cluster][0]][record]["split_for_detecting_marriage"] = "possible born surname found"
cluster_list[cluster_number_list[cluster][0]][record]["matching"].add("split at time gap")
del cluster_number_list[cluster]
def _rework_for_marriages(self, cluster_list, cluster_number_list):
# all the virtual records which now enter this function have fulfilled the criteria above. If the two virtual records for an original one are allocated into different clusters, it means the original record could be different persons, so make it ambiguous
# the matching_codes are only combined, if matched. For an ambigous born surname, the matching_codes of the virtual record remain in other records in the same cluster, but not in its original record, because of the other ambigous born surname
#try:
for i_id in sorted(cluster_number_list):
new_cluster_nr_add = False
new_cluster_nr = []
###########################################################################################
# find out the conflicting situations, where two virtual records still exist
if len(cluster_number_list[i_id]) > 2:
for threeclusters in range(len(cluster_number_list[i_id])):
for record in cluster_list[cluster_number_list[i_id][threeclusters]]:
record["split_for_detecting_marriage"] = "ambiguous born surnames found"
# clear this dictionary, but not deleted (because of for loop)
cluster_number_list[i_id].clear()
############################################################################################
# if there is no conflicting virtual record anymore, only one virtual record still exists
elif len(cluster_number_list[i_id]) == 2:
#if not yet in the same cluster (because of other records, the real and the virtual record could already be brought into the same cluster)
i_cluster = cluster_number_list[i_id][0]
virtual_cluster = cluster_number_list[i_id][sorted(cluster_number_list[i_id])[-1]]
if i_cluster != virtual_cluster:
# create a comparing list for all records (only containing names and years) from the clusters, to which the original record could belong
comparing_list = []
# add names, years of the records in the cluster, where the virtual record is, into the comparing list
length_virtual_cluster = len(cluster_list[virtual_cluster])
for record1 in range(length_virtual_cluster):
comparing_list.append({})
comparing_list[-1]["year"] = cluster_list[virtual_cluster][record1]["year"]
# original name is changed back for comparing
if "virtual_row_nr" in cluster_list[virtual_cluster][record1]:
comparing_list[-1]["snm_normalized"] = cluster_list[virtual_cluster][record1]["original_snm"]
else:
comparing_list[-1]["snm_normalized"] = cluster_list[virtual_cluster][record1]["snm_normalized"]
# add all the records in the cluster, where the original record is, into the comparing list
for record0 in range(len(cluster_list[i_cluster])):
comparing_list.append({})
comparing_list[-1]["year"] = cluster_list[i_cluster][record0]["year"]
# if one record is a virtual record, the surname should be replaced by its original name for comparing
if "virtual_row_nr" in cluster_list[i_cluster][record0]:
comparing_list[-1]["snm_normalized"] = cluster_list[i_cluster][record0]["original_snm"]
else:
comparing_list[-1]["snm_normalized"] = cluster_list[i_cluster][record0]["snm_normalized"]
#all records in the comparing list are ordered by year of the publication
comparing_list = sorted(comparing_list, key=lambda k: k['year'])
# how many times the name has been changed, is counted
change_time = 0
for x in range(len(comparing_list)):
if x < len(comparing_list)-1 and comparing_list[x+1]["snm_normalized"] != comparing_list[x]["snm_normalized"]:
change_time += 1
''' shouldn't be happening:
# for the situation like "Jane Smith-Miller","Jane Smith-Miller","Jane Smith-Miller", where only combinations exist and those are the same
if change_time == 0:
# there are only virtual records in the cluster, where the virtual record is (maybe split by time gap)
for record2 in range(len(cluster_list[i_cluster])):
cluster_list[i_cluster][record2]["split_for_detecting_marriage"] = "possible born surname not found"
'''
# conflicting REAL name combinations, such as "Jane Smith","Jane Smith-Miller","Jane Smith-Walker"
conflicting_name_combination_found = False
for first_real_name_delete in range(len(comparing_list)):
if len(comparing_list[first_real_name_delete]["snm_normalized"].split()) > 1:
for second_real_name_delete in range(len(comparing_list)):
if len(comparing_list[second_real_name_delete]["snm_normalized"].split()) > 1 and comparing_list[first_real_name_delete]["snm_normalized"] != comparing_list[second_real_name_delete]["snm_normalized"]:
conflicting_name_combination_found = True
# mark the record in cluster, where the virtual cluster are, ambiguous
for record8 in range(len(cluster_list[virtual_cluster])):
cluster_list[virtual_cluster][record8]["split_for_detecting_marriage"] = "conflicting marriage name combinations"
# mark the record in cluster, where the original cluster are, ambiguous
for record3 in range(len(cluster_list[i_cluster])):
cluster_list[i_cluster][record3]["split_for_detecting_marriage"] = "conflicting marriage name combinations"
cluster_number_list[cluster_list[i_cluster][record3]["id"]].clear()
# if there is no conflicting name combinations
if not conflicting_name_combination_found:
# regarding devorce:
# Situation like Jane Smith-Miller & Jane Smith & Jane Smith-Miller
# Situation like Jane Smith & Jane Smith-Miller & Jane Smith & Jane Smith-Miller
# or no devorce:
# Situation like (Jane Smith-Miller & Jane Smith)
# Situation like (Jane Smith & Jane Smith-Miller & Jane Smith)
# are allowed
# if unallowed situation happends, the records in the comparing list belong to different persons:
if (accept_devorce and len(comparing_list[0]["snm_normalized"].split()) > 1 and change_time > 1) or \
(accept_devorce and len(comparing_list[0]["snm_normalized"].split()) == 1 and change_time > 2) or \
(not accept_devorce and len(comparing_list[0]["snm_normalized"].split()) > 1 and change_time > 0) or \
(not accept_devorce and len(comparing_list[0]["snm_normalized"].split()) == 1 and change_time > 1) :
for record9 in range(len(cluster_list[virtual_cluster])):
cluster_list[virtual_cluster][record9]["split_for_detecting_marriage"] = "non-linear surname changes"
for record4 in range(len(cluster_list[i_cluster])):
cluster_list[i_cluster][record4]["split_for_detecting_marriage"] = "non-linear surname changes"
if cluster_list[i_cluster][record4]["id"] in cluster_number_list and len(cluster_number_list[cluster_list[i_cluster][record4]["id"]]) > 0:
cluster_number_list[cluster_list[i_cluster][record4]["id"]].clear()
# if the records in the comparing list belong to the same person:
else:
new_cluster_nr_add = True
new_cluster_nr.append(i_cluster)
new_matching_code = cluster_list[virtual_cluster][0]["matching"].union(cluster_list[i_cluster][0]["matching"])
#move the records in the cluster, where the original record is, into the cluster, where the virtual one is
while len(cluster_list[virtual_cluster]) > 0:
# record the cluster nr and id for rework for marriage name later
if "split_for_detecting_marriage" in cluster_list[virtual_cluster][-1]:
cluster_number_list[cluster_list[virtual_cluster][-1]["id"]][0] = new_cluster_nr[-1]
elif "virtual_row_nr" in cluster_list[virtual_cluster][-1]:
if cluster_list[virtual_cluster][-1]["virtual_row_nr"] == 0:
cluster_number_list[cluster_list[virtual_cluster][-1]["id"]][1] = new_cluster_nr[-1]
else:
cluster_number_list[cluster_list[virtual_cluster][-1]["id"]][2] = new_cluster_nr[-1]
cluster_list[i_cluster].append(cluster_list[virtual_cluster][-1])
cluster_list[virtual_cluster].pop(-1)
for record7 in range(len(cluster_list[i_cluster])):
cluster_list[i_cluster][record7]["split_for_detecting_marriage"] = "matched for the possible surname change"
cluster_list[i_cluster][record7]["matching"] = new_matching_code
if cluster_list[i_cluster][record7]["id"] in cluster_number_list and len(cluster_number_list[cluster_list[i_cluster][record7]["id"]]) > 0:
cluster_number_list[cluster_list[i_cluster][record7]["id"]].clear()
#for all_new_record_to_fix in range(len(cluster_list[virtual_cluster])):
#cluster_list[virtual_cluster][all_new_record_to_fix]["matching"] = str(original_matching_code_part1.union(virtual_matching_code_part1)) + original_matching_code_part2 + virtual_matching_code_part2
else:
pass
#except:
#pass
#clean up the virtual records in the cluster
for fix_cluster in sorted(cluster_list):
fix_record = 0
while fix_record < len(cluster_list[fix_cluster]):
try:
if "virtual_row_nr" in cluster_list[fix_cluster][fix_record]:
cluster_list[fix_cluster].pop(fix_record)
# add split for marriage
elif "split_for_detecting_marriage" in cluster_list[fix_cluster][fix_record]:
#cluster_list[fix_cluster][fix_record]["matching"] += cluster_list[fix_cluster][fix_record]["split_for_detecting_marriage"]
#del cluster_list[fix_cluster][fix_record]["split_for_detecting_marriage"]
fix_record += 1
else:
cluster_list[fix_cluster][fix_record]["split_for_detecting_marriage"] = ""
fix_record += 1
except:
fix_record += 1
pass
#cannot remove clusters only consisting of virtual records, because the cluster_list is a dict. If all records in a cluster is removed, then it's a empty cluster, which does not affect anything.
# all keys in cluster_list after removing the empty cluster: key - 1 table_07 02
def _remove_empty_cluster(self, cluster_list):
''' some cluster could be empty because the records were moved into other clusters
'''
for cluster in range(len(sorted(cluster_list))):
# in case cluster does not exist
try:
if len(cluster_list[cluster]) < 1:
#print(cluster,"is empty")
del cluster_list[cluster]
except:
pass
# renumber clusters
cluster_list_temp = {}
cluster_renumber = 0
for cluster in sorted(cluster_list):
cluster_list_temp[cluster_renumber] = cluster_list[cluster]
for record in range(len(cluster_list_temp[cluster_renumber])):
cluster_list_temp[cluster_renumber][record]["cluster"] = cluster_renumber
if not self._detect_marriages:
cluster_list_temp[cluster_renumber][record]["split_for_detecting_marriage"] = None
cluster_renumber += 1
cluster_list.clear()
#print("cleared:",cluster_list)
for renamed_cluster in sorted(cluster_list_temp):
cluster_list[renamed_cluster] = cluster_list_temp[renamed_cluster]
#for x in cluster_list:
#print("reloaded:",x,cluster_list[x])
#for x in cluster_list_temp:
# print(x, cluster_list_temp[x])
def _save_to_file(self, cluster_list, output_format, file_name, name_table_format):
''' Save cluster_list to csv or to authors table in a database
'''
##########################################
# Transform to list of dicts table structure
# Initialize output
output_data = self._make_flat_result(cluster_list, name_table_format)
# Recognize format, if none given
if output_format is None:
if "xls" in file_name:
output_format = "xls"
else:
output_format = "csv"
########################################
# Save the structure to output format
# If output to csv file
if output_format=="csv":
# Check if file extension is ".csv" and attach this string otherwise
if file_name[-1:]=="/" or file_name[-1:]=="\\" or file_name=="":
file_name="persons.csv"
elif file_name[-4:]!=".csv":
file_name += ".csv"
# Get the file
csvWriter = csv.writer(open(file_name, 'w'), lineterminator='\n')
# First write column headers
csvWriter.writerow( list(output_data[0].keys()) )
# Write data
for i in range( len(output_data) ):
csvWriter.writerow([output_data[i][k] for k in output_data[i] ])
# If output to csv file
if output_format=="xls":
try:
import pandas as pd
except:
print("Requires 'pandas' to export as xlsx.")
return
# Check if file extension is ".csv" and attach this string otherwise
if file_name[-1:]=="/" or file_name[-1:]=="\\" or file_name=="":
file_name="persons.xlsx"
elif file_name[-4:]!=".xlsx":
file_name += ".xlsx"
# Get the file
xlsWriter = pd.ExcelWriter(file_name)
# Convert output to pandas DataFrame
df = self._convert_records_to_pandas(output_data)
# First write column headers
df.to_excel(xlsWriter, "persons")
xlsWriter.save()
def _make_flat_result(self, cluster_list, name_table_format):
''' Flaten cluster_list to list of records
'''
##########################################
# Transform to list of dicts table structure
# Initialize output
output_data = []
processed_time = datetime.now(self._tz)
processed_time_string = processed_time.strftime(self._fmt)
# Iterate over all clusters
for i_cluster in cluster_list:
for record in cluster_list[i_cluster]:
# Compile table of the output data
matching_code = "equal"
if "vertical" in record["matching"]:
matching_code = "vertical"
if "interlaced" in record["matching"]:
matching_code = "interlaced"
output_data .append( collections.OrderedDict([
("person_id" , record["cluster"] ),
# ("fnm_normalized" , record["fnm_normalized"] ),
# ("snm_normalized" , record["snm_normalized"] ),
("source" , record["source"] ),
(name_table_format["columns"]["id_column"] , record["id"] ),
(name_table_format["columns"]["fnm_column"] , record["fnm"] ),
(name_table_format["columns"]["snm_column"] , record["snm"] ),
("matching" , matching_code ),
("saving_time" , processed_time_string )
]) )
if "year" in record.keys():
output_data[-1][name_table_format["columns"]["year_column"]] = record["year"]
if "mnm" in record.keys():
output_data[-1][name_table_format["columns"]["mnm_column"]] = record["mnm"]
if "maximum_time_gap" in record.keys():
output_data[-1]["maximum_time_gap"] = record["maximum_time_gap"]
if self._detect_marriages:
output_data[-1]["detecting_marriage"] = record["split_for_detecting_marriage"]
return output_data
def _convert_records_to_pandas(self, in_data):
import pandas as pd
return pd.DataFrame( collections.OrderedDict( [ ( key , [ elem[key] for elem in in_data ] ) for key in in_data[0].keys() ] ) )
def _find_node_by_name(self, names, snm, fnm):
''' Find the node number for a given name in the tree 'names'
'''
try:
for index_record in range( len(names[snm]["records"]) ):
if names[snm]["records"][index_record]["fnm_normalized"]==fnm:
return names[snm]["node_by_record"][index_record]
except:
pass
return -1
def _identify_cols(self, table, source_type):
forename_identifiers = ["fore", "first", "fnm", "given", "christian", "baptism", "baptismal"] # http://www.thesaurus.com/browse/first-name
surname_identifiers = ["sur", "last", "snm", "family", "cognomen", "byname", "matronymic", "patronymic", "metronymic"] # http://www.thesaurus.com/browse/surname
middlename_identifiers = ["middle", "initial", "second"]
year_identifiers = ["year", "yr"]
id_identifiers = ["id"]
colnames = table[0].keys()
# If no middle name
name_format = "fnm and snm"
year_column = None
mnm_column = None
id_column = None
for colname in colnames:
colname_norm = normalize(colname).replace(" ", "")
if any([(x in colname_norm) for x in forename_identifiers]):
fnm_column = colname
elif any([(x in colname_norm) for x in surname_identifiers]):
snm_column = colname
elif any([(x in colname_norm) for x in middlename_identifiers]):
mnm_column = colname
name_format = "fnm, middle and snm"
elif any([(x in colname_norm) for x in year_identifiers]):
year_column = colname
elif any([(x in colname_norm) for x in id_identifiers]):
id_column = colname
if fnm_column is None:
print("Error: Forename column missing! Please provide a column titled 'forename'.")
if snm_column is None:
print("Error: Surname column missing! Please provide a column titled 'surname'.")
return {
"name_format" : name_format,
"source_type" : source_type,
"columns" : {
"id_column" : id_column,
"snm_column" : snm_column,
"fnm_column" : fnm_column,
"mnm_column" : mnm_column,
"year_column" : year_column
}
}
def _add_id_col(self, table):
count = 0
for record in table:
record["name_id"] = count
count += 1
def _add_empty_col(self, table, col_name):
for record in table:
record[col_name] = ""
def _convert_table_to_records(self, in_data, format):
if format=="pandas":
return in_data.to_dict("records")
if format=="csv":
import csv
# Get the file
csvReader = csv.DictReader(open(in_data, 'r'))
return [ row for row in csvReader ]
if format=="xls":
try:
import pandas as pd
except:
print("Requires 'pandas' to export as xlsx.")
return
# Get the file
df = pd.read_excel(in_data)
return df.to_dict("records")
##########################################################
### Public functions ################################
def plot_persons(self, snm, fnm, selection="interrelated"):
'''
Draw a graph depicting the relationships between the names.
'''
if self._flat_tree is None:
print("Before plotting a graph, first process a table of names through the 'persons_from_names' function.")
return
names = self._flat_tree
snm = normalize(snm)
fnm = normalize(fnm)
start_node = self._find_node_by_name(names, snm, fnm)
if start_node!=-1:
# Reference to the records_by_node mapping
records_by_node = names[snm]["records_by_node"]
# Process all nodes
to_be_processed = list( range( len(records_by_node) ) )
# Move given node to the beginning
to_be_processed . remove(start_node)
to_be_processed = [start_node] + to_be_processed
# Find relevant set of nodes
if selection=="interrelated":
nodes = self._find_interrelated(names, snm, to_be_processed, ["identical", self._me_subset, self._it_subset, "crossed"])
elif selection=="vertical":
nodes = self._find_interrelated(names, snm, to_be_processed, ["identical", self._me_subset, self._it_subset])
elif selection=="all":
nodes = to_be_processed
# Node names
node_names = set()
# Graph (create from adjacency matrix)
G = Graph(names[snm]["matrix"], list(nodes))
# Transitive reduction
G . transitive_reduction()
# Get top nodes sorted by first name
G . set_top_nodes()
top_nodes = [names[snm]["records"][ records_by_node[x][0] ]["fnm_normalized"] for x in G.top_nodes]
top_nodes = sorted(top_nodes)
top_nodes.reverse()
G.top_nodes_sorted = [self._find_node_by_name(names, snm, x) for x in top_nodes]
# Get positions for a tree like layout
positions = G.get_node_positions()
matrix = G.matrix
edges = []
vertical_edges = []
crossed_edges = []
labels = []
# Node positions with the right coding (names, instead of number)
node_positions = {}
# Iterate over all edges
for first in nodes:
for second in nodes:
# Extract first name of the first and second node
fnm_first = names[snm]["records"][ records_by_node[first][0] ]["fnm"]
fnm_second = names[snm]["records"][ records_by_node[second][0] ]["fnm"]
# If the nodes are in a subset relationship
if matrix[first][second] in [self._me_subset, self._it_subset] and (fnm_second, fnm_first) not in vertical_edges:
edges.append( (fnm_first, fnm_second) )
vertical_edges.append( (fnm_first, fnm_second) )
labels.append( "subset" )
node_positions[fnm_first] = positions[first]
node_positions[fnm_second] = positions[second]
node_names.add(fnm_first)
node_names.add(fnm_second)
# If the nodes are in a non-transitive relationship
elif matrix[first][second]=="crossed" and (fnm_second, fnm_first) not in crossed_edges:
edges.append( (fnm_first, fnm_second) )
crossed_edges.append( (fnm_first, fnm_second) )
labels.append( "interlaced" )
node_positions[fnm_first] = positions[first]
node_positions[fnm_second] = positions[second]
node_names.add(fnm_first)
node_names.add(fnm_second)
# Draw the graph
draw_graph(nodes=list(node_names), edges=edges, vertical_edges=vertical_edges, crossed_edges=crossed_edges, labels=labels, node_positions=node_positions)
else:
print("Name not found.")
def persons_from_names(self, name_table, known_persons=None, output_file=None, output_file_format=None, status_messages=True):
"""
Identify persons in a table of names.
Check the file "examples.py" for usage examples.
Parameters:
- name_table: table of names (and potentially years)
-> Supported input formats presently include:
-- "records" (default, list of dicts or OrderedDicts, e.g. returned by SQL query)
-- "pandas" (pandas DataFrame)
-> accepts file path
-> the following file formats are supported
-- "csv" (csv file, requires additional parameter "path_name_table")
-- "xls" (Excel file, requires additional parameter "path_name_table")
- known_persons: table of names of known unique persons who have been identified previously
-> accepts file path if input_format="csv" or input_format="xls"
- output_file path for saving result in a file
- output_file_format: supported output file formats presently include
-- "csv" (default)
-- "xls"
"""
# Save start time:
zeit=int(time.time())
####
## Prepare input table
####
# Recognize input format
if "pandas" in str(type(name_table)):
input_format = "pandas"
elif "list" in str(type(name_table)):
input_format = "records"
elif "str" in str(type(name_table)):
if ".csv" in name_table:
input_format = "csv"
elif "xls" in name_table:
input_format = "xls"
# Convert table to internal data format
if input_format != "records":
name_table = self._convert_table_to_records(name_table, input_format)
# Identify forename col
name_table_format = self._identify_cols(name_table, "default table")
# Add id column if missing
if name_table_format["columns"]["id_column"] is None:
self._add_id_col(name_table)
name_table_format["columns"]["id_column"]="name_id"
# Same as above for known persons table
if known_persons is not None:
# Convert table to internal data format
if input_format != "records":
known_persons = self._convert_table_to_records(known_persons, input_format)
# Identify forename col
known_persons_format = self._identify_cols(known_persons, self._table_with_unique_names)
if known_persons_format["columns"]["id_column"] is None:
self._add_id_col(known_persons)
known_persons_format["columns"]["id_column"]="name_id"
if known_persons_format["columns"]["year_column"] is None:
self._add_empty_col(known_persons, "year")
known_persons_format["columns"]["year_column"]="year"
####
## Sort input data into a tree structure according to surname
####
# Internal data structure by surname
self._flat_tree=collections.OrderedDict()
if status_messages:
print("Tree creation in progress...")
self._make_flat_tree(name_table, self._flat_tree, name_table_format)
if known_persons is not None:
# Identify forename col
self._make_flat_tree(known_persons, self._flat_tree, known_persons_format)
####
## Person identification from forename
####
if status_messages:
print("Clustering in progress...")
cluster_list={}
self._cluster_number = 0
# to record in which clusters the original records and their virtual ones are assigned
cluster_number_list = {}
self._cluster(self._flat_tree, cluster_list, cluster_number_list)
if self._split_by_time_gap and name_table_format["columns"]["year_column"] is not None:
if status_messages:
print(format("Splitting entries with more than {} years between chronologically succeeding entries...", self._maximum_time_gap))
self._time_gap(cluster_list, self._maximum_time_gap, cluster_number_list, action="split")
if self._detect_marriages:
if status_messages:
print("Detecting marriages and combining entries with marriage-related surname change...")
self._rework_for_marriages(cluster_list, cluster_number_list)
if self._empty_clusters_remove:
if status_messages:
print("Tidying up...")
self._remove_empty_cluster(cluster_list)
####
## Processing results
####
# Save authors to file
if output_file is not None:
if status_messages:
print("Saving the results")
self._save_to_file(cluster_list, output_file_format, output_file, name_table_format)
# if status_messages:
# print( "Name matching completed in {} seconds. Identified {} persons.".format( str( int(time.time()) - zeit ) , str(len(cluster_list)) ) )
if input_format=="pandas":
return self._convert_records_to_pandas(self._make_flat_result(cluster_list, name_table_format))
elif input_format=="records" and "dict" in str(type(name_table[0])):
return [ dict(record) for record in self._make_flat_result(cluster_list, name_table_format) ]
else:
return self._make_flat_result(cluster_list, name_table_format)
| python | 70,920 |
"""
myDatasetEvaluator.py
Extensions for evaluations.
Copyright (C) 2016 John Moore
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import OrderedDict
import logging
from picklable_itertools.extras import equizip
import theano
from theano import tensor
from blocks.utils import dict_subset
from blocks.monitoring.aggregation import (_DataIndependent, Mean,
TakeLast, MonitoredQuantity)
from blocks.graph import ComputationGraph
from blocks.utils import reraise_as
import numpy as np
import time
logger = logging.getLogger(__name__)
class myDatasetEvaluator(object):
def __init__(self, fRR, sharedVars, sharedDataTrain, sharedDataValid):
self.fRR = fRR
self.sharedVars=sharedVars
self.localShared = np.array(0.0, dtype=theano.config.floatX)
self.sharedDataTrain = sharedDataTrain
self.sharedDataValid = sharedDataValid
#switch pointers between x and y
def switchData(self, x, y):
for key in x:
temp = x[key].get_value(borrow=True)
x[key].set_value(y[key].get_value(borrow=True), borrow=True)
y[key].set_value(temp, borrow=True)
def evaluate(self, data_stream):
startTime = time.time()
self.switchData(self.sharedDataTrain, self.sharedDataValid)
for batch in data_stream.get_epoch_iterator(as_dict=True):
self.fRR(batch['int_stream_From'], batch['int_stream_To'])
self.switchData(self.sharedDataTrain, self.sharedDataValid)
endTime = time.time()
print("time: "+str(endTime-startTime))
MRR = self.sharedVars['sharedMRRSUM'].get_value()/ self.sharedVars['sharedTOTSUM'].get_value()
self.sharedVars['sharedMRRSUM'].set_value(self.localShared)
self.sharedVars['sharedTOTSUM'].set_value(self.localShared)
return {'MRR': 1-MRR}
def evaluateTest(self, data_stream, sharedDataTest):
startTime = time.time()
self.switchData(self.sharedDataTrain, sharedDataTest)
for batch in data_stream.get_epoch_iterator(as_dict=True):
self.fRR(batch['int_stream_From'], batch['int_stream_To'])
self.switchData(self.sharedDataTrain, sharedDataTest)
endTime = time.time()
print("time: "+str(endTime-startTime))
MRR = self.sharedVars['sharedMRRSUM'].get_value()/ self.sharedVars['sharedTOTSUM'].get_value()
self.sharedVars['sharedMRRSUM'].set_value(self.localShared)
self.sharedVars['sharedTOTSUM'].set_value(self.localShared)
return MRR
| python | 3,206 |
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Text, Integer, String, Float
from app import db
from app.models.user_model import UserModel
class TimelineModel(db.Model):
__tablename__ = 'timelines'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
title = Column(String(255), nullable=False, default='')
content = Column(Text, nullable=False, default='')
latitude = Column(Float, nullable=False, default=-1)
longitude = Column(Float, nullable=False, default=-1)
created_at = Column(DateTime, default=datetime.datetime.now)
def __init__(self, user_id=None, title=None, content=None, latitude=None, longitude=None):
self.user_id = user_id
self.title = title
self.content = content
self.latitude = latitude
self.longitude = longitude
def get_timeline(order='desc', page=0, limit=10):
posts = []
posts_query = TimelineModel.query \
.order_by(TimelineModel.id.asc() if order == 'asc' else TimelineModel.id.desc()) \
.limit(limit) \
.offset(page * limit)
for post in posts_query:
posts.append(post)
return posts
def get_post(post_id=0):
post_query = TimelineModel.query \
.filter(TimelineModel.id == post_id).first()
return post_query
| python | 1,422 |
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
dataset = pd.read_csv("C:/Users/Sarthak/Downloads/train.csv")
#print(data)
clf = DecisionTreeClassifier()
#Training Datasets
xtrain = dataset.iloc[0:21000,1:].values
train_label = dataset.iloc[0:21000,0].values
clf.fit(xtrain, train_label)
#Testing Data
xtest = dataset.iloc[21000:,1:].values
actual_label = dataset.iloc[21000:,0].values
#sample data
d = xtest[8] #can use any index below 42000
d.shape = (28,28)
plt.imshow(255-d,cmap = "gray") #we have 255-d because I want white background with black colour
plt.show()
print(clf.predict([xtest[8]]))
#accuracy
p = clf.predict([xtest]) #can't pass d because it only takes single row vector
count = 0
for i in range(0,21000):
count += 1
if p[i]:
print(actual_label[i])
else:
print("0")
print("ACCURACY", (count/21000)*100) | python | 973 |
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Providing interface methods."""
import types
import sys
from collections import OrderedDict
from functools import wraps
from mindspore import context
from mindspore import log as logger
from .tensor import Tensor as MsTensor
from .._c_expression import generate_key, Executor_, Tensor, MetaTensor, PynativeExecutor_
from .._c_expression import verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline
from ..parallel._ps_context import _is_role_pserver
from ..parallel._utils import _get_device_num, _get_global_rank, _need_to_full, _check_full_batch, _to_full_tensor, \
_get_parameter_broadcast, _get_pipeline_stages
# store ms_function class compiled pipeline cache
ms_compile_cache = {}
BROADCAST_PHASE = "_broadcast_"
def _convert_function_arguments(fn, *args):
"""
Process the fn default parameters.
Args:
fn (Function): The function to be parsed.
args (tuple): The parameters of the function.
"""
arguments_dict = OrderedDict()
parse_method = None
if isinstance(fn, (types.FunctionType, types.MethodType)):
parse_method = fn.__name__
index = 0
for value in args:
arguments_dict[f'arg{index}'] = value
index = index + 1
logger.debug("fn(%r) full parameters dict is: %r", fn, arguments_dict)
converted = True
else:
logger.warning("Find error: fn isn't function or method")
converted = False
return converted, arguments_dict, parse_method
def _wrap_func(fn):
"""
Wrapper function, convert return data to tensor or tuple of tensor.
Args:
fn (Function): The function need be wrapped.
Returns:
Function, a new function with return suitable format data.
"""
@wraps(fn)
def wrapper(*arg, **kwargs):
results = fn(*arg, **kwargs)
def _convert_data(data):
if isinstance(data, Tensor) and not isinstance(data, MsTensor):
return MsTensor(data)
if isinstance(data, tuple):
return tuple(_convert_data(x) for x in data)
if isinstance(data, list):
return list(_convert_data(x) for x in data)
return data
return _convert_data(results)
return wrapper
def _exec_init_graph(obj, init_phase):
"""Execute the parameter initializer graph."""
inst_executor = Executor_.get_instance()
param_dict = OrderedDict()
for name, param in obj.parameters_dict().items():
if not param.is_init:
param_dict[name] = param
param.is_init = True
param.data.init_flag = True
if param_dict:
inst_executor.run_init_graph(param_dict, init_phase)
class _MindSporeFunction:
"""
Represents a function compiled by mind expression.
_MindSporeFunction will compile the original function for every combination
of argument types and shapes it is given (as well as their values, optionally).
Args:
fn (Function): The root function to compile.
input_signature (Function): User defines signature to verify input.
obj (Object): If function is a method, obj is the owner of function,
else, obj is none.
"""
def __init__(self, fn, input_signature=None, obj=None):
self.fn = fn
self.save_graphs = context.get_context("save_graphs")
self.save_graphs_path = context.get_context("save_graphs_path")
self.input_signature = input_signature
self.obj = None
self.identify_obj = None
if hasattr(obj, fn.__name__):
self.obj = obj
elif obj is not None:
self.identify_obj = obj
self._executor = Executor_.get_instance()
def build_data_init_graph(self, graph_name):
"""Build GE data graph and init graph for the given graph name."""
if self.obj is None:
logger.warning("Make sure parameter should not be used in function")
para_dict = OrderedDict()
self._executor.build_data_graph(para_dict, graph_name)
return
self._executor.build_data_graph(self.obj.parameters_dict(), graph_name, self.obj.parameters_broadcast_dict())
init_phase = "init_subgraph" + graph_name[graph_name.find("."):]
_exec_init_graph(self.obj, init_phase)
def compile(self, arguments_dict, method_name):
"""Returns pipeline for the given args."""
args_list = tuple(arguments_dict.values())
arg_names = tuple(arguments_dict.keys())
# remove first self parameter when fn is a method
if self.obj is not None:
args_list = args_list[1:]
arg_names = arg_names[1:]
# verify the signature for both function and method
if self.input_signature is not None:
signatures = []
for sig_spec in self.input_signature:
if not isinstance(sig_spec, MetaTensor):
raise TypeError("Input_signature is not MetaTensor")
signatures.append(sig_spec)
is_valid_input = verify_inputs_signature(signatures, args_list)
if not is_valid_input:
raise ValueError("Inputs is incompatible with input signature!")
dic = dict(zip(arg_names, args_list))
generate_name = self.fn.__module__ + "." + self.fn.__name__
self.fn.__parse_method__ = method_name
# replace key with obj info and object ext info when fn is a method
if self.obj is not None:
self.obj.__parse_method__ = method_name
generate_name = self.obj.__module__ + "."
if self.obj.__class__.__name__ != "ClipByNorm":
generate_name = generate_name + str(self.obj.create_time)
if self.identify_obj is not None:
generate_name = generate_name + str(id(self.identify_obj))
key = generate_key(generate_name, dic)
phase = str(key[1]) + generate_name
if key not in ms_compile_cache.keys():
is_compile = False
if self.obj is None:
is_compile = self._executor.compile(self.fn, args_list, phase, True)
else:
is_compile = self._executor.compile(self.obj, args_list, phase, True)
if not is_compile:
raise RuntimeError("Executor compile failed.")
if context.get_context("enable_ge"):
self.build_data_init_graph(phase)
# since function can be redefined, we only cache class method pipeline
if self.obj is not None or self.identify_obj is not None:
ms_compile_cache[key] = phase
return phase
return ms_compile_cache[key]
@_wrap_func
def __call__(self, *args):
init_pipeline()
converted, arguments_dict, parse_method = _convert_function_arguments(self.fn, *args)
if not converted:
raise RuntimeError('Process function parameter is failure')
args_list = tuple(arguments_dict.values())
if self.obj is not None:
args_list = args_list[1:]
phase = self.compile(arguments_dict, parse_method)
if context.get_context("precompile_only"):
return None
new_inputs = []
for i in args_list:
if isinstance(i, Tensor):
new_inputs.append(i)
return self._executor(tuple(new_inputs), phase)
def ms_function(fn=None, obj=None, input_signature=None):
"""
Create a callable MindSpore graph from a python function.
This allows the MindSpore runtime to apply optimizations based on graph.
Args:
fn (Function): The Python function that will be run as a graph. Default: None.
obj (Object): The Python Object that provides the information for identifying the compiled function.Default:
None.
input_signature (MetaTensor): The MetaTensor which describes the input arguments. The MetaTensor specifies
the shape and dtype of the Tensor and they will be supplied to this function. If input_signature
is specified, each input to `fn` must be a `Tensor`. And the input parameters of `fn` cannot accept
`**kwargs`. The shape and dtype of actual inputs should keep the same as input_signature. Otherwise,
TypeError will be raised. Default: None.
Returns:
Function, if `fn` is not None, returns a callable function that will execute the compiled function; If `fn` is
None, returns a decorator and when this decorator invokes with a single `fn` argument, the callable function is
equal to the case when `fn` is not None.
Examples:
>>> from mindspore.ops import functional as F
...
>>> x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
>>> y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
...
>>> # create a callable MindSpore graph by calling ms_function
>>> def tensor_add(x, y):
... z = x + y
... return z
...
>>> tensor_add_graph = ms_function(fn=tensor_add)
>>> out = tensor_add_graph(x, y)
...
>>> # create a callable MindSpore graph through decorator @ms_function
>>> @ms_function
... def tensor_add_with_dec(x, y):
... z = x + y
... return z
...
>>> out = tensor_add_with_dec(x, y)
...
>>> # create a callable MindSpore graph through decorator @ms_function with input_signature parameter
>>> @ms_function(input_signature=(MetaTensor(mindspore.float32, (1, 1, 3, 3)),
... MetaTensor(mindspore.float32, (1, 1, 3, 3))))
... def tensor_add_with_sig(x, y):
... z = x + y
... return z
...
>>> out = tensor_add_with_sig(x, y)
"""
def wrap_mindspore(func):
@wraps(func)
def staging_specialize(*args):
process_obj = obj
if args and not isinstance(args[0], MsTensor) and hasattr(args[0], func.__name__):
process_obj = args[0]
return _MindSporeFunction(func, input_signature, process_obj)(*args)
return staging_specialize
if fn is not None:
return wrap_mindspore(fn)
return wrap_mindspore
def _generate_pip_args(obj, *args, method="construct"):
"""Generate arguments for pipeline."""
if hasattr(obj, method):
fn = getattr(obj, method)
else:
raise AttributeError('The process method is not exist')
converted, arguments_dict, parse_method = _convert_function_arguments(fn, *args)
if not converted:
raise RuntimeError('Process method parameter is failure')
args_list = tuple(arguments_dict.values())
args_names = tuple(arguments_dict.keys())
obj.__parse_method__ = parse_method
return args_names, args_list
def _get_auto_split_param_names(parameter_layout_dict):
auto_split_params = {}
for key, value in parameter_layout_dict.items():
for dim in value[1]:
if dim != -1:
auto_split_params[key] = value
break
auto_split_param_names = (param_name for param_name in auto_split_params)
return auto_split_param_names
def _build_broadcast_graph(broadcast_params_dict, broadcast_phase):
"""Build broadcast graph."""
from mindspore.nn.wrap.cell_wrapper import _BroadCastCell
if not broadcast_params_dict:
broadcast_params_dict = {}
broadcast_params = []
for param in broadcast_params_dict.values():
broadcast_params.append(Tensor(param.asnumpy()))
_broadcast_net = _BroadCastCell(broadcast_params)
_broadcast_net.phase = broadcast_phase
broadcasted_params = _broadcast_net()
for param_name, param in zip(broadcast_params_dict.keys(), broadcasted_params):
broadcast_params_dict[param_name].set_data(param)
def _parameter_broadcast(obj, auto_parallel_mode):
"""Parameter broadcast."""
auto_split_param_names = []
if auto_parallel_mode:
auto_split_param_names = _get_auto_split_param_names(obj.parameter_layout_dict)
broadcast_params_dict = obj.parameters_broadcast_dict()
if auto_split_param_names and broadcast_params_dict:
broadcast_params_dict = OrderedDict()
for param_name, param in obj.parameters_broadcast_dict().items():
if param_name not in auto_split_param_names:
broadcast_params_dict[param_name] = param
broadcast_phase = "_broadcast_subgraph"
_build_broadcast_graph(broadcast_params_dict, broadcast_phase)
class _PynativeExecutor:
"""
An pynative executor used to compile/manage/run graph.
Returns:
Graph, return the result of pipeline running.
"""
def __init__(self):
self._executor = PynativeExecutor_.get_instance()
def new_graph(self, obj, *args, **kwargs):
self._executor.new_graph(obj, *args, *(kwargs.values()))
def end_graph(self, obj, output, *args, **kwargs):
self._executor.end_graph(obj, output, *args, *(kwargs.values()))
def check_graph(self, obj, *args, **kwargs):
return self._executor.check_graph(obj, *args, *(kwargs.values()))
def check_run(self, obj, *args, **kwargs):
return self._executor.check_run(obj, *args, *(kwargs.values()))
def grad(self, grad, obj, weights, *args, **kwargs):
self._executor.grad_net(grad, obj, weights, *args, *(kwargs.values()))
def clear(self, cell_id=""):
self._executor.clear(cell_id)
def sync(self):
self._executor.sync()
def set_grad_flag(self, flag):
self._executor.set_grad_flag(flag)
def enter_construct(self, cell):
self._executor.enter_construct(cell)
def leave_construct(self, cell):
self._executor.leave_construct(cell)
def parameter_broadcast(self, obj, phase, auto_parallel_mode):
if BROADCAST_PHASE not in phase and _get_parameter_broadcast():
_parameter_broadcast(obj, auto_parallel_mode)
def __call__(self, obj, *args, **kwargs):
args = args + tuple(kwargs.values())
return self._executor(obj, args, "")
class _Executor:
"""
An executor used to compile/manage/run graph.
Including data_graph, train_graph, eval_graph and predict graph.
Returns:
Graph, return the result of pipeline running.
"""
def __init__(self):
# create needed graph by lazy mode
self.is_init = False
self._executor = Executor_.get_instance()
self.compile_cache = {}
self._executor.set_py_exe_path(sys.executable)
def init_dataset(self, queue_name, dataset_size, batch_size, dataset_types, dataset_shapes,
input_indexs, phase='dataset'):
"""
Initialization interface for calling data subgraph.
Args:
queue_name (str): The name of tdt queue on the device.
dataset_size (int): The size of dataset.
batch_size (int): The size of batch.
dataset_types (list): The output types of element in dataset.
dataset_shapes (list): The output shapes of element in dataset.
input_indexs (list): The index of data with net.
phase (str): The name of phase, e.g., train_dataset/eval_dataset. Default: 'dataset'.
Returns:
bool, specifies whether the data subgraph was initialized successfully.
"""
if not init_exec_dataset(queue_name=queue_name,
size=dataset_size,
batch_size=batch_size,
types=dataset_types,
shapes=dataset_shapes,
input_indexs=input_indexs,
phase=phase):
raise RuntimeError("Failure to init and dataset subgraph!")
return True
def _build_data_graph(self, obj, phase):
self._executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict())
def _set_dataset_mode(self, args_list):
"""set dataset mode."""
# decide whether to sink based on whether the inputs is virtual or args_list is ()
if (args_list and isinstance(args_list[0], Tensor) and args_list[0].virtual_flag) or \
(args_list is not None and args_list == ()):
_set_dataset_mode_config('sink')
else:
_set_dataset_mode_config('normal')
def compile(self, obj, *args, phase='predict', do_convert=True, auto_parallel_mode=False):
"""
Compiles graph.
Args:
obj (Function/Cell): The function or cell instance need compile.
args (tuple): Function or cell input arguments.
phase (str): The name of compile phase. Default: 'predict'.
do_convert (bool): When set to True, convert ME graph to GE graph after compiling graph.
auto_parallel_mode: When set to True, use auto parallel mode to compile graph.
Return:
Str, the full phase of the cell.
Bool, if the graph has been compiled before, return False, else return True.
"""
args_names, args_list = _generate_pip_args(obj, *args)
dic = dict(zip(args_names, args_list))
key = generate_key(phase, dic)
obj.phase_prefix = str(key[1])
if 'export' in phase:
phase = phase + '.' + obj.phase_prefix + '.' + str(obj.create_time)
else:
phase = obj.phase_prefix + phase + '.' + str(obj.create_time)
if phase in self.compile_cache.keys():
logger.debug("%r graph has existed.", phase)
return phase, False
obj.check_names()
_check_full_batch()
self._set_dataset_mode(args_list)
is_sink_mode = args and isinstance(args[0], Tensor) and args[0].virtual_flag
if auto_parallel_mode and _need_to_full() and not is_sink_mode and obj.auto_parallel_compile_and_run():
args_full = _to_full_tensor(args, _get_device_num(), _get_global_rank())
_, args_list = _generate_pip_args(obj, *args_full)
enable_debug_runtime = context.get_context("enable_debug_runtime")
enable_ge = context.get_context("enable_ge")
use_vm = not enable_ge or (enable_debug_runtime and context.get_context("mode") == context.PYNATIVE_MODE)
result = self._executor.compile(obj, args_list, phase, use_vm)
self.compile_cache[phase] = phase
if not result:
raise RuntimeError("Executor compile failed.")
graph = self._executor.get_func_graph(phase)
if graph is None:
logger.error("%r graph compile failed.", phase)
if not do_convert:
return phase, True
if auto_parallel_mode:
obj.parameter_layout_dict = self._executor.get_parameter_layout(phase)
if _get_pipeline_stages() > 1:
obj.parallel_parameter_name_list = self._executor.get_parallel_parameter_name_list(phase)
obj.remove_redundant_parameters()
replace = obj.init_parameters_data(auto_parallel_mode=auto_parallel_mode)
if not enable_debug_runtime or enable_ge:
if auto_parallel_mode:
obj.load_parameter_slice(None)
self._updata_param_node_default_input(phase, replace)
# set parallel inputs in sink mode
if auto_parallel_mode and is_sink_mode:
obj.set_parallel_input_with_inputs(*args)
# the following GE init process is not needed when use vm or ms backend
if enable_ge:
self._build_data_graph(obj, phase)
if "export" not in phase:
init_phase = "init_subgraph" + "." + str(obj.create_time)
_exec_init_graph(obj, init_phase)
elif not enable_ge and "export" in phase:
self._build_data_graph(obj, phase)
elif BROADCAST_PHASE not in phase and _get_parameter_broadcast():
_parameter_broadcast(obj, auto_parallel_mode)
return phase, True
def _updata_param_node_default_input(self, phase, replace):
new_param = {x.name: replace[x] for x in replace if id(x) != id(replace[x])}
return self._executor.updata_param_node_default_input(phase, new_param)
def _get_shard_strategy(self, obj):
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time)
return self._executor.get_strategy(real_phase)
def _get_num_parallel_ops(self, obj):
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time)
return self._executor.get_num_parallel_ops(real_phase)
def _get_allreduce_fusion(self, obj):
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time)
return self._executor.get_allreduce_fusion(real_phase)
def has_compiled(self, phase='predict'):
"""
Specify whether have been compiled.
Args:
phase (str): The phase name. Default: 'predict'.
Returns:
bool, specifies whether the specific graph has been compiled.
"""
return self._executor.has_compiled(phase)
def __call__(self, obj, *args, phase='predict'):
if context.get_context("precompile_only") or _is_role_pserver():
return None
return self.run(obj, *args, phase=phase)
@_wrap_func
def _exec_pip(self, obj, *args, phase=''):
"""Execute the generated pipeline."""
fn = obj.construct
converted, arguments_dict, parse_method = _convert_function_arguments(fn, *args)
if not converted:
raise RuntimeError('Process method parameter is failure')
args_list = tuple(arguments_dict.values())
obj.__parse_method__ = parse_method
return self._executor(args_list, phase)
def run(self, obj, *args, phase='predict'):
"""
Run the specific graph.
Args:
phase (str): The phase name. Default: 'predict'.
Returns:
Tensor/Tuple, return execute result.
"""
if phase == 'save':
return self._executor((), phase + '.' + str(obj.create_time))
phase_real = obj.phase_prefix + phase + '.' + str(obj.create_time)
if self.has_compiled(phase_real):
return self._exec_pip(obj, *args, phase=phase_real)
raise KeyError('{} graph is not exist.'.format(phase_real))
def del_net_res(self, net_id):
self._executor.del_net_res(net_id)
def _get_func_graph_proto(self, obj, exec_id, ir_type="onnx_ir", use_prefix=False):
"""Get graph proto from pipeline."""
if use_prefix:
exec_id = obj.phase_prefix + exec_id
if self._executor.has_compiled(exec_id) is False:
return None
return self._executor.get_func_graph_proto(exec_id, ir_type)
def export(self, file_name, graph_id):
"""
Export graph.
Args:
file_name (str): File name of model to export
graph_id (str): id of graph to be exported
"""
from .._c_expression import export_graph
export_graph(file_name, 'AIR', graph_id)
def fetch_info_for_quant_export(self, exec_id):
"""Get graph proto from pipeline."""
if self._executor.has_compiled(exec_id) is False:
return None
return self._executor.fetch_info_for_quant_export(exec_id)
_executor = _Executor()
_pynative_exec = _PynativeExecutor()
__all__ = ['ms_function']
| python | 24,410 |
# Copyright 2017-2019 Manheim / Cox Automotive
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest.mock import patch, call, DEFAULT, Mock, PropertyMock
import pytest
from functools import partial
from c7n.config import Config
from c7n_mailer.cli import CONFIG_SCHEMA as MAILER_SCHEMA
from manheim_c7n_tools.vendor.mugc import AWS
import manheim_c7n_tools.runner as runner
from manheim_c7n_tools.runner import BaseStep
from manheim_c7n_tools.utils import bold
from manheim_c7n_tools.config import ManheimConfig
from c7n_mailer.deploy import get_archive
from c7n.mu import PythonPackageArchive
pbm = 'manheim_c7n_tools.runner'
ALL_REGIONS = [
"ap-south-1",
"eu-west-3",
"eu-west-2",
"eu-west-1",
"ap-northeast-2",
"ap-northeast-1",
"sa-east-1",
"ca-central-1",
"ap-southeast-1",
"ap-southeast-2",
"eu-central-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2"
]
class FakeConfig:
def __init__(self, regions):
self.regions = regions
class StepTester(object):
def setup(self):
# in order to supplant __getattr__ calls
self.m_conf = Mock(spec=ManheimConfig)
self.m_conf.account_id = '01234567890'
class TestPolicygenStep(StepTester):
def test_run(self):
with patch('%s.PolicyGen' % pbm, autospec=True) as mock_pg:
runner.PolicygenStep(None, self.m_conf).run()
assert mock_pg.mock_calls == [
call(self.m_conf),
call().run()
]
def test_dryrun(self):
with patch('%s.PolicyGen' % pbm, autospec=True) as mock_pg:
runner.PolicygenStep(None, self.m_conf).dryrun()
assert mock_pg.mock_calls == [
call(self.m_conf),
call().run()
]
def test_run_in_region(self):
conf = FakeConfig(ALL_REGIONS)
for rname in ALL_REGIONS:
if rname == ALL_REGIONS[0]:
assert runner.PolicygenStep.run_in_region(rname, conf) is True
else:
assert runner.PolicygenStep.run_in_region(rname, conf) is False
class TestValidateStep(StepTester):
def test_run(self):
mock_conf = Mock(spec_set=ManheimConfig)
with patch('%s.validate' % pbm, autospec=True) as mock_validate:
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
runner.ValidateStep('rName', self.m_conf).run()
assert mock_validate.mock_calls == [call(mock_conf)]
assert mock_empty.mock_calls == [
call(configs=['custodian_rName.yml'], region='rName',
check_deprecations="yes")
]
def test_dryrun(self):
mock_conf = Mock(spec_set=ManheimConfig)
with patch('%s.validate' % pbm, autospec=True) as mock_validate:
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
runner.ValidateStep('rName', self.m_conf).dryrun()
assert mock_validate.mock_calls == [call(mock_conf)]
assert mock_empty.mock_calls == [
call(configs=['custodian_rName.yml'], region='rName',
check_deprecations="yes")
]
def test_run_in_region(self):
for rname in ALL_REGIONS:
assert runner.ValidateStep.run_in_region(rname, None) is True
class TestMugcStep(StepTester):
def test_run(self):
type(self.m_conf).function_prefix = PropertyMock(
return_value='foobar-'
)
mock_pol1 = Mock(provider_name='foo')
mock_pol2 = Mock(provider_name='aws')
mock_pol3 = Mock(provider_name='azure')
mock_conf = Mock(spec_set=ManheimConfig)
mock_aws = Mock(spec_set=AWS)
mock_aws.initialize_policies.return_value = {'aws': 'policies'}
mock_pc = Mock()
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
with patch.multiple(
pbm,
AWS=DEFAULT,
load_policies=DEFAULT,
resources_gc_prefix=DEFAULT,
PolicyCollection=DEFAULT
) as mocks:
mocks['AWS'].return_value = mock_aws
mocks['PolicyCollection'].return_value = mock_pc
mocks['load_policies'].return_value = [
mock_pol1, mock_pol2, mock_pol3
]
runner.MugcStep('rName', self.m_conf).run()
assert mock_empty.mock_calls == [
call(
config_files=['custodian_rName.yml'],
regions=['rName'],
prefix='foobar-',
policy_regex='^foobar\\-.*',
assume=None,
policy_filter=None,
log_group=None,
external_id=None,
cache_period=0,
cache=None,
present=False
)
]
assert mocks['load_policies'].mock_calls == [
call(mock_conf, mock_conf)
]
assert mocks['PolicyCollection'].mock_calls == [
call([mock_pol2], mock_conf)
]
assert mocks['AWS'].mock_calls == [
call(),
call().initialize_policies(mock_pc, mock_conf)
]
assert mocks['resources_gc_prefix'].mock_calls == [
call(mock_conf, mock_conf, {'aws': 'policies'})
]
def test_dryrun(self):
type(self.m_conf).function_prefix = PropertyMock(
return_value='foobar-'
)
mock_pol1 = Mock(provider_name='foo')
mock_pol2 = Mock(provider_name='aws')
mock_pol3 = Mock(provider_name='azure')
mock_conf = Mock(spec_set=ManheimConfig)
mock_aws = Mock(spec_set=AWS)
mock_aws.initialize_policies.return_value = {'aws': 'policies'}
mock_pc = Mock()
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
with patch.multiple(
pbm,
AWS=DEFAULT,
load_policies=DEFAULT,
resources_gc_prefix=DEFAULT,
PolicyCollection=DEFAULT
) as mocks:
mocks['AWS'].return_value = mock_aws
mocks['PolicyCollection'].return_value = mock_pc
mocks['load_policies'].return_value = [
mock_pol1, mock_pol2, mock_pol3
]
runner.MugcStep('rName', self.m_conf).dryrun()
assert mock_empty.mock_calls == [
call(
config_files=['custodian_rName.yml'],
regions=['rName'],
prefix='foobar-',
policy_regex='^foobar\\-.*',
assume=None,
policy_filter=None,
log_group=None,
external_id=None,
cache_period=0,
cache=None,
present=False,
dryrun=True
)
]
assert mocks['load_policies'].mock_calls == [
call(mock_conf, mock_conf)
]
assert mocks['PolicyCollection'].mock_calls == [
call([mock_pol2], mock_conf)
]
assert mocks['AWS'].mock_calls == [
call(),
call().initialize_policies(mock_pc, mock_conf)
]
assert mocks['resources_gc_prefix'].mock_calls == [
call(mock_conf, mock_conf, {'aws': 'policies'})
]
def test_run_in_region(self):
for rname in ALL_REGIONS:
assert runner.MugcStep.run_in_region(rname, None) is True
class TestCustodianStep(StepTester):
def test_run(self):
type(self.m_conf).output_s3_bucket_name = PropertyMock(
return_value='cloud-custodian-ACCT-REGION'
)
type(self.m_conf).custodian_log_group = PropertyMock(
return_value='/cloud-custodian/ACCT/REGION'
)
mock_conf = Mock(spec_set=Config)
with patch('%s.run' % pbm) as mock_run:
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
runner.CustodianStep('rName', self.m_conf).run()
assert mock_run.mock_calls == [call(mock_conf)]
assert mock_empty.mock_calls == [
call(
configs=['custodian_rName.yml'],
region='rName',
regions=['rName'],
log_group='/cloud-custodian/ACCT/REGION',
verbose=1,
metrics_enabled=True,
subparser='run',
cache='/tmp/.cache/cloud-custodian.cache',
command='c7n.commands.run',
output_dir='cloud-custodian-ACCT-REGION/logs',
vars=None,
dryrun=False
)
]
def test_dryrun(self):
type(self.m_conf).output_s3_bucket_name = PropertyMock(
return_value='cloud-custodian-ACCT-REGION'
)
type(self.m_conf).custodian_log_group = PropertyMock(
return_value='/cloud-custodian/ACCT/REGION'
)
mock_conf = Mock(spec_set=Config)
with patch('%s.run' % pbm) as mock_run:
with patch('%s.Config.empty' % pbm) as mock_empty:
mock_empty.return_value = mock_conf
runner.CustodianStep('rName', self.m_conf).dryrun()
assert mock_run.mock_calls == [call(mock_conf)]
assert mock_empty.mock_calls == [
call(
configs=['custodian_rName.yml'],
region='rName',
regions=['rName'],
verbose=1,
metrics_enabled=False,
subparser='run',
cache='/tmp/.cache/cloud-custodian.cache',
command='c7n.commands.run',
output_dir='dryrun/rName',
vars=None,
dryrun=True
)
]
def test_run_in_region(self):
for rname in ALL_REGIONS:
assert runner.CustodianStep.run_in_region(rname, None) is True
class TestMailerStep(StepTester):
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_docker(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
def se_isdir(d):
if d == '/manheim_c7n_tools/manheim_c7n_tools/mailer-templates':
return True
return False
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': [
'/manheim_c7n_tools/manheim_c7n_tools/mailer-templates'
]
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_nondocker(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
d['templates_folders'] = []
def se_isdir(d):
if d == '/abspath/path/to/mailer-templates':
return True
return False
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': ['/abspath/path/to/mailer-templates']
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_existing_folders(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
d['templates_folders'] = ['/my/folder']
def se_isdir(d):
if d == '/abspath/path/to/mailer-templates':
return True
return False
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': [
'/abspath/path/to/mailer-templates',
'/my/folder'
]
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_only_existing(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
d['templates_folders'] = ['/my/folder']
def se_isdir(d):
return False
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': [
'/my/folder'
]
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_pwd(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
def se_isdir(d):
if d == '/abspath/./mailer-templates':
return True
return False
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': [
'/abspath/./mailer-templates'
]
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
@patch(f'{pbm}.__file__', 'path/to/runner.py')
def test_mailer_config_all_exist(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_config = PropertyMock(
return_value={'mailer': 'config'}
)
def se_mailer_setup_defaults(d):
d['defaults'] = 'set'
d['templates_folders'] = ['/my/folder']
def se_isdir(d):
return True
def se_abspath(p):
return f'/abspath/{p}'
with patch(
'%s.jsonschema.validate' % pbm, autospec=True
) as mock_validate:
with patch(
'%s.mailer_setup_defaults' % pbm, autospec=True
) as mock_msd:
mock_msd.side_effect = se_mailer_setup_defaults
with patch(
'%s.os.path.isdir' % pbm
) as mock_isdir:
mock_isdir.side_effect = se_isdir
with patch(
'%s.os.path.abspath' % pbm
) as mock_abspath:
mock_abspath.side_effect = se_abspath
res = runner.MailerStep(
'rName', m_conf
).mailer_config
expected = {
'mailer': 'config',
'defaults': 'set',
'templates_folders': [
'/abspath/path/to/mailer-templates',
'/manheim_c7n_tools/manheim_c7n_tools/mailer-templates',
'/abspath/./mailer-templates',
'/my/folder'
]
}
assert res == expected
assert mock_validate.mock_calls == [
call(expected, MAILER_SCHEMA)
]
assert mock_msd.mock_calls == [
call(expected)
]
def test_run(self):
m_partial = Mock(spec_set=partial)
m_conf = Mock(spec_set=ManheimConfig)
with patch(
'%s.MailerStep.mailer_config' % pbm, new_callable=PropertyMock
) as mock_config:
with patch(
'%s.mailer_deploy.provision' % pbm, autospec=True
) as mock_prov:
with patch(
'%s.session_factory' % pbm, autospec=True
) as mock_sf:
with patch(
'%s.functools.partial' % pbm, autospec=True
) as mock_partial:
mock_config.return_value = m_conf
mock_partial.return_value = m_partial
runner.MailerStep('rName', self.m_conf).run()
assert mock_partial.mock_calls == [call(mock_sf, m_conf)]
assert mock_prov.mock_calls == [call(m_conf, m_partial)]
assert mock_config.mock_calls == [call()]
def test_dryrun(self):
m_partial = Mock(spec_set=partial)
m_conf = Mock(spec_set=ManheimConfig)
with patch(
'%s.MailerStep.mailer_config' % pbm, new_callable=PropertyMock
) as mock_config:
with patch(
'%s.mailer_deploy.provision' % pbm, autospec=True
) as mock_prov:
with patch(
'%s.session_factory' % pbm, autospec=True
):
with patch(
'%s.functools.partial' % pbm, autospec=True
) as mock_partial:
mock_config.return_value = m_conf
mock_partial.return_value = m_partial
runner.MailerStep('rName', self.m_conf).dryrun()
assert mock_partial.mock_calls == []
assert mock_prov.mock_calls == []
assert mock_config.mock_calls == [call()]
def test_run_in_region(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).mailer_regions = PropertyMock(
return_value=['us-east-1']
)
for rname in ALL_REGIONS:
if rname == 'us-east-1':
assert runner.MailerStep.run_in_region(rname, m_conf) is True
else:
assert runner.MailerStep.run_in_region(rname, m_conf) is False
def test_mailer_archive(self):
"""
This is a test of ``c7n_mailer.deploy.get_archive()``. We've had a few
dependency issues in the past that aren't detected until actually
deploying mailer and generating the archive (zip) for the Lambda, so
this attempts to detect those issues.
"""
arch = get_archive({'templates_folders': []})
assert isinstance(arch, PythonPackageArchive)
assert arch.size > 0
assert len(arch.get_filenames()) > 0
class TestDryRunDiffStep(StepTester):
def test_run(self):
with patch('%s.DryRunDiffer' % pbm, autospec=True) as mock_drd:
runner.DryRunDiffStep('rName', self.m_conf).run()
assert mock_drd.mock_calls == []
def test_dryrun(self):
with patch('%s.DryRunDiffer' % pbm, autospec=True) as mock_drd:
runner.DryRunDiffStep('rName', self.m_conf).dryrun()
assert mock_drd.mock_calls == [
call(self.m_conf),
call().run(diff_against='origin/master')
]
def test_run_in_region(self):
type(self.m_conf).regions = PropertyMock(
return_value=ALL_REGIONS
)
for rname in ALL_REGIONS:
if rname == 'us-west-2':
assert runner.DryRunDiffStep.run_in_region(
rname, self.m_conf
) is True
else:
assert runner.DryRunDiffStep.run_in_region(
rname, self.m_conf
) is False
class TestS3ArchiverStep(StepTester):
def test_run(self):
type(self.m_conf).output_s3_bucket_name = PropertyMock(
return_value='cloud-custodian-ACCT-REGION'
)
with patch('%s.S3Archiver' % pbm, autospec=True) as mock_s3a:
runner.S3ArchiverStep('rName', self.m_conf).run()
assert mock_s3a.mock_calls == [
call(
'rName',
'cloud-custodian-ACCT-REGION',
'custodian_rName.yml'
),
call().run()
]
def test_dryrun(self):
type(self.m_conf).output_s3_bucket_name = PropertyMock(
return_value='cloud-custodian-ACCT-REGION'
)
with patch('%s.S3Archiver' % pbm, autospec=True) as mock_s3a:
runner.S3ArchiverStep('rName', self.m_conf).dryrun()
assert mock_s3a.mock_calls == [
call(
'rName',
'cloud-custodian-ACCT-REGION',
'custodian_rName.yml',
dryrun=True
),
call().run()
]
def test_run_in_region(self):
for rname in ALL_REGIONS:
assert runner.S3ArchiverStep.run_in_region(rname, None) is True
class TestDocsBuildStep(StepTester):
def test_run(self):
with patch(
'%s.DocsBuildStep._run_sphinx_build' % pbm, autospec=True
) as m_rsb:
cls = runner.DocsBuildStep('rName', self.m_conf)
cls.run()
assert m_rsb.mock_calls == [call(cls)]
def test_dryrun(self):
with patch(
'%s.DocsBuildStep._run_sphinx_build' % pbm, autospec=True
) as m_rsb:
cls = runner.DocsBuildStep('rName', self.m_conf)
cls.dryrun()
assert m_rsb.mock_calls == [call(cls)]
def test_run_sphinx_build(self):
with patch('%s.os.path.exists' % pbm, autospec=True) as mock_ope:
with patch('%s.rmtree' % pbm, autospec=True) as mock_rmtree:
with patch(
'%s.sphinx_main' % pbm, autospec=True
) as mock_sphinx:
mock_ope.return_value = False
mock_sphinx.return_value = 0
runner.DocsBuildStep(
'rName', self.m_conf
)._run_sphinx_build()
assert mock_ope.mock_calls == [call('docs/_build')]
assert mock_rmtree.mock_calls == []
assert mock_sphinx.mock_calls == [
call(['-W', 'docs/source', 'docs/_build', '-b', 'dirhtml'])
]
def test_run_sphinx_build_failure(self):
with patch('%s.os.path.exists' % pbm, autospec=True) as mock_ope:
with patch('%s.rmtree' % pbm, autospec=True) as mock_rmtree:
with patch(
'%s.sphinx_main' % pbm, autospec=True
) as mock_sphinx:
mock_ope.return_value = True
mock_sphinx.return_value = 3
with pytest.raises(RuntimeError) as exc:
runner.DocsBuildStep(
'rName', self.m_conf
)._run_sphinx_build()
assert str(exc.value) == 'Sphinx exited 3'
assert mock_ope.mock_calls == [call('docs/_build')]
assert mock_rmtree.mock_calls == [call('docs/_build')]
assert mock_sphinx.mock_calls == [
call(['-W', 'docs/source', 'docs/_build', '-b', 'dirhtml'])
]
def test_run_in_region(self):
conf = FakeConfig(ALL_REGIONS)
for rname in ALL_REGIONS:
if rname == ALL_REGIONS[0]:
assert runner.DocsBuildStep.run_in_region(rname, conf) is True
else:
assert runner.DocsBuildStep.run_in_region(rname, conf) is False
class TestStepClasses(object):
def test_all_subclasses_have_unique_name(self):
subc = [x for x in runner.BaseStep.__subclasses__()]
names = []
for x in subc:
assert x.name is not None
names.append(x.name.strip())
assert len(names) == len(set(names))
assert None not in names
assert '' not in names
def test_base_step_run_in_region(self):
assert runner.BaseStep.run_in_region('foo', None) is True
class TestCustodianRunner(object):
def setup(self):
def se_cls2(rname, r_conf):
return rname in ['r1', 'r3']
def se_cls3(rname, r_conf):
return rname == 'r1'
self.cls1 = Mock(spec_set=BaseStep)
type(self.cls1).name = PropertyMock(return_value='cls1')
self.cls1.run_in_region.return_value = True
self.cls2 = Mock(spec_set=BaseStep)
type(self.cls2).name = PropertyMock(return_value='cls2')
self.cls2.run_in_region.side_effect = se_cls2
self.cls3 = Mock(spec_set=BaseStep)
type(self.cls3).name = PropertyMock(return_value='cls3')
self.cls3.run_in_region.side_effect = se_cls3
self.cls4 = Mock(spec_set=BaseStep)
type(self.cls4).name = PropertyMock(return_value='cls4')
self.cls4.run_in_region.return_value = True
self.steps = [self.cls1, self.cls2, self.cls3, self.cls4]
def test_init(self):
m_conf = Mock(spec_set=ManheimConfig)
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
cls = runner.CustodianRunner('acctName', 'cpath')
assert cls.config == m_conf
assert cls._config_path == 'cpath'
assert mock_cff.mock_calls == [call('cpath', 'acctName')]
def test_run_all_steps(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).regions = PropertyMock(
return_value=['r1', 'r2', 'r3']
)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch.multiple(
'%s.CustodianRunner' % pbm,
autospec=True,
_steps_to_run=DEFAULT,
_run_step_in_regions=DEFAULT,
_validate_account=DEFAULT
) as mocks:
mocks['_steps_to_run'].return_value = [
self.cls1, self.cls2, self.cls3, self.cls4
]
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch(
'%s.ManheimConfig.from_file' % pbm
) as mock_cff:
mock_cff.return_value = m_conf
cls = runner.CustodianRunner('acctName')
cls.run('run')
assert mocks['_steps_to_run'].mock_calls == [call(cls, [], [])]
assert mocks['_run_step_in_regions'].mock_calls == [
call(cls, 'run', self.cls1, ['r1', 'r2', 'r3']),
call(cls, 'run', self.cls2, ['r1', 'r2', 'r3']),
call(cls, 'run', self.cls3, ['r1', 'r2', 'r3']),
call(cls, 'run', self.cls4, ['r1', 'r2', 'r3'])
]
assert self.cls1.mock_calls == []
assert self.cls2.mock_calls == []
assert self.cls3.mock_calls == []
assert self.cls4.mock_calls == []
assert mock_logger.mock_calls == [
call.info(bold('Beginning run - 4 steps')),
call.info(bold('Step 1 of 4 - cls1')),
call.info(bold('Step 2 of 4 - cls2')),
call.info(bold('Step 3 of 4 - cls3')),
call.info(bold('Step 4 of 4 - cls4')),
call.info(bold('SUCCESS: All 4 steps complete!'))
]
assert mock_cff.mock_calls == [
call('manheim-c7n-tools.yml', 'acctName')
]
assert mocks['_validate_account'].mock_calls == [call(cls)]
def test_run_dryrun_some_steps_some_regions(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).regions = PropertyMock(
return_value=['r1', 'r2', 'r3']
)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch.multiple(
'%s.CustodianRunner' % pbm,
autospec=True,
_steps_to_run=DEFAULT,
_run_step_in_regions=DEFAULT,
_validate_account=DEFAULT
) as mocks:
mocks['_steps_to_run'].return_value = [
self.cls2, self.cls3
]
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
cls = runner.CustodianRunner('aName')
cls.run(
'dryrun',
regions=['r2'],
step_names=['cls2', 'cls3', 'cls4'],
skip_steps=['cls4']
)
assert mocks['_steps_to_run'].mock_calls == [
call(cls, ['cls2', 'cls3', 'cls4'], ['cls4'])
]
assert mocks['_run_step_in_regions'].mock_calls == [
call(cls, 'dryrun', self.cls2, ['r2']),
call(cls, 'dryrun', self.cls3, ['r2'])
]
assert self.cls1.mock_calls == []
assert self.cls2.mock_calls == []
assert self.cls3.mock_calls == []
assert self.cls4.mock_calls == []
assert mock_logger.mock_calls == [
call.info(bold('Beginning dryrun - 2 of 4 steps selected')),
call.info(bold('Step 1 of 2 - cls2')),
call.info(bold('Step 2 of 2 - cls3')),
call.info(bold('SUCCESS: All 2 steps complete!'))
]
assert mock_cff.mock_calls == [call('manheim-c7n-tools.yml', 'aName')]
assert mocks['_validate_account'].mock_calls == [call(cls)]
def test_run_invalid_region_name(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).regions = PropertyMock(
return_value=['r1', 'r2', 'r3']
)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch.multiple(
'%s.CustodianRunner' % pbm,
autospec=True,
_steps_to_run=DEFAULT,
_run_step_in_regions=DEFAULT,
_validate_account=DEFAULT
) as mocks:
mocks['_steps_to_run'].return_value = [
self.cls2, self.cls3
]
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
with pytest.raises(RuntimeError) as exc:
cls = runner.CustodianRunner('acctName')
cls.run(
'dryrun',
regions=['notValid'],
step_names=['cls2', 'cls3', 'cls4'],
skip_steps=['cls4']
)
assert str(exc.value) == 'ERROR: All specified region names must be ' \
'listed in the "regions" section of the ' \
'config file (manheim-c7n-tools.yml)'
assert mocks['_steps_to_run'].mock_calls == [
call(cls, ['cls2', 'cls3', 'cls4'], ['cls4'])
]
assert mocks['_run_step_in_regions'].mock_calls == []
assert self.cls1.mock_calls == []
assert self.cls2.mock_calls == []
assert self.cls3.mock_calls == []
assert self.cls4.mock_calls == []
assert mock_logger.mock_calls == [
call.info(bold('Beginning dryrun - 2 of 4 steps selected'))
]
assert mocks['_validate_account'].mock_calls == [call(cls)]
def test_validate_account(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).account_name = PropertyMock(
return_value='myAcct'
)
type(m_conf).account_id = PropertyMock(return_value='0234567890')
with patch('%s.boto3.client' % pbm) as mock_client:
mock_client.return_value.get_caller_identity.return_value = {
'UserId': 'MyUID',
'Arn': 'myARN',
'Account': '0234567890'
}
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
cls = runner.CustodianRunner('acctName')
cls._validate_account()
assert mock_cff.mock_calls == [
call('manheim-c7n-tools.yml', 'acctName')
]
assert mock_client.mock_calls == [
call('sts', region_name='us-east-1'),
call().get_caller_identity()
]
def test_validate_account_failed(self):
m_conf = Mock(spec_set=ManheimConfig)
type(m_conf).account_name = PropertyMock(
return_value='myAcct'
)
type(m_conf).account_id = PropertyMock(return_value='1234567890')
with patch('%s.boto3.client' % pbm) as mock_client:
mock_client.return_value.get_caller_identity.return_value = {
'UserId': 'MyUID',
'Arn': 'myARN',
'Account': '9876543210'
}
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
cls = runner.CustodianRunner('acctName')
with pytest.raises(RuntimeError) as exc:
cls._validate_account()
assert str(exc.value) == 'ERROR: Using configuration for account ' \
'1234567890 (myAcct), but ' \
'sts:GetCallerIdentity reports connected to ' \
'account 9876543210'
assert mock_cff.mock_calls == [
call('manheim-c7n-tools.yml', 'acctName')
]
assert mock_client.mock_calls == [
call('sts', region_name='us-east-1'),
call().get_caller_identity()
]
def test_steps_to_run_all(self):
m_conf = Mock(spec_set=ManheimConfig)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
res = runner.CustodianRunner('acctName')._steps_to_run([], [])
assert res == self.steps
def test_steps_to_run_step_names(self):
m_conf = Mock(spec_set=ManheimConfig)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
res = runner.CustodianRunner('acctName')._steps_to_run(
['cls3', 'cls1'], []
)
assert res == [self.cls1, self.cls3]
def test_steps_to_run_skip_steps(self):
m_conf = Mock(spec_set=ManheimConfig)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
res = runner.CustodianRunner('aName')._steps_to_run(
[], ['cls4', 'cls2']
)
assert res == [self.cls1, self.cls3]
def test_steps_to_run_names_and_skip(self):
m_conf = Mock(spec_set=ManheimConfig)
with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps):
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
res = runner.CustodianRunner('aName')._steps_to_run(
['cls3', 'cls2', 'cls1'], ['cls1']
)
assert res == [self.cls2, self.cls3]
def test_ordered_step_classes(self):
"""ensures all are subclasses of BaseStep"""
m_conf = Mock(spec_set=ManheimConfig)
for klass in runner.CustodianRunner.ordered_step_classes:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
assert isinstance(klass(None, m_conf), runner.BaseStep)
def test_run_in_regions_run(self):
m_conf = Mock(spec_set=ManheimConfig)
m_conf_r1 = Mock(spec_set=ManheimConfig)
m_conf_r2 = Mock(spec_set=ManheimConfig)
m_conf_r3 = Mock(spec_set=ManheimConfig)
def se_conf_for_region(rname):
if rname == 'r1':
return m_conf_r1
if rname == 'r2':
return m_conf_r2
if rname == 'r3':
return m_conf_r3
m_conf.config_for_region.side_effect = se_conf_for_region
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
runner.CustodianRunner('acctName')._run_step_in_regions(
'run', self.cls1, ['r1', 'r2', 'r3']
)
assert self.cls1.mock_calls == [
call.run_in_region('r1', m_conf_r1),
call('r1', m_conf_r1),
call().run(),
call.run_in_region('r2', m_conf_r2),
call('r2', m_conf_r2),
call().run(),
call.run_in_region('r3', m_conf_r3),
call('r3', m_conf_r3),
call().run()
]
assert m_conf.config_for_region.mock_calls == [
call('r1'),
call('r2'),
call('r3')
]
assert mock_logger.mock_calls == [
call.info(bold('Step cls1 in REGION 1 of 3 (r1)')),
call.info(bold('Step cls1 in REGION 2 of 3 (r2)')),
call.info(bold('Step cls1 in REGION 3 of 3 (r3)'))
]
def test_run_in_regions_policygen_run(self):
m_conf = Mock(spec_set=ManheimConfig)
m_conf_r1 = Mock(spec_set=ManheimConfig)
m_conf_r2 = Mock(spec_set=ManheimConfig)
m_conf_r3 = Mock(spec_set=ManheimConfig)
def se_conf_for_region(rname):
if rname == 'r1':
return m_conf_r1
if rname == 'r2':
return m_conf_r2
if rname == 'r3':
return m_conf_r3
m_conf.config_for_region.side_effect = se_conf_for_region
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
with patch('%s.PolicygenStep' % pbm, autospec=True) as mock_pgs:
type(mock_pgs).name = PropertyMock(return_value='policygen')
mock_pgs.run_in_region.return_value = True
runner.CustodianRunner('acctName')._run_step_in_regions(
'run', mock_pgs, ['r1', 'r2', 'r3']
)
assert mock_pgs.mock_calls == [
call.run_in_region('r1', m_conf),
call('r1', m_conf),
call().run(),
call.run_in_region('r2', m_conf),
call('r2', m_conf),
call().run(),
call.run_in_region('r3', m_conf),
call('r3', m_conf),
call().run()
]
assert m_conf.config_for_region.mock_calls == []
assert mock_logger.mock_calls == [
call.info(bold('Step policygen in REGION 1 of 3 (r1)')),
call.info(bold('Step policygen in REGION 2 of 3 (r2)')),
call.info(bold('Step policygen in REGION 3 of 3 (r3)'))
]
def test_run_in_regions_dryrun_skip_some(self):
m_conf = Mock(spec_set=ManheimConfig)
m_conf_r1 = Mock(spec_set=ManheimConfig)
m_conf_r2 = Mock(spec_set=ManheimConfig)
m_conf_r3 = Mock(spec_set=ManheimConfig)
def se_conf_for_region(rname):
if rname == 'r1':
return m_conf_r1
if rname == 'r2':
return m_conf_r2
if rname == 'r3':
return m_conf_r3
m_conf.config_for_region.side_effect = se_conf_for_region
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff:
mock_cff.return_value = m_conf
runner.CustodianRunner('acctName')._run_step_in_regions(
'dryrun', self.cls2, ['r2', 'r3']
)
assert self.cls2.mock_calls == [
call.run_in_region('r2', m_conf_r2),
call.run_in_region('r3', m_conf_r3),
call('r3', m_conf_r3),
call().dryrun()
]
assert m_conf.config_for_region.mock_calls == [
call('r2'),
call('r3')
]
assert mock_logger.mock_calls == [
call.info(bold('SKIPPING Step cls2 in REGION 1 of 2 (r2)')),
call.info(bold('Step cls2 in REGION 2 of 2 (r3)'))
]
class TestParseArgs(object):
def test_run(self):
p = runner.parse_args(['run', 'aName'])
assert p.verbose == 0
assert p.steps == []
assert p.skip == []
assert p.ACTION == 'run'
assert p.regions == []
assert p.config == 'manheim-c7n-tools.yml'
assert p.ACCT_NAME == 'aName'
assert p.assume_role is True
def test_run_skip_steps(self):
p = runner.parse_args(
['-S', 'foo', '--skip-step=bar', 'run', 'acctName']
)
assert p.verbose == 0
assert p.steps == []
assert p.skip == ['foo', 'bar']
assert p.ACTION == 'run'
assert p.regions == []
assert p.config == 'manheim-c7n-tools.yml'
assert p.ACCT_NAME == 'acctName'
assert p.assume_role is True
def test_dryrun_info_region(self):
p = runner.parse_args(['-v', '-r', 'us-east-1', 'dryrun', 'aName'])
assert p.verbose == 1
assert p.steps == []
assert p.skip == []
assert p.ACTION == 'dryrun'
assert p.regions == ['us-east-1']
assert p.config == 'manheim-c7n-tools.yml'
assert p.ACCT_NAME == 'aName'
assert p.assume_role is True
def test_list(self):
p = runner.parse_args(['-c', 'foobar.yml', '--no-assume-role', 'list'])
assert p.verbose == 0
assert p.steps == []
assert p.skip == []
assert p.ACTION == 'list'
assert p.regions == []
assert p.config == 'foobar.yml'
assert p.assume_role is False
def test_list_accounts(self):
p = runner.parse_args(['accounts'])
assert p.verbose == 0
assert p.steps == []
assert p.skip == []
assert p.ACTION == 'accounts'
assert p.regions == []
assert p.config == 'manheim-c7n-tools.yml'
assert p.assume_role is True
def test_run_debug_steps_assume_role(self):
p = runner.parse_args(
['-vv', '-A', '-s', 'foo', '--step=bar', 'run', 'aName']
)
assert p.verbose == 2
assert p.steps == ['foo', 'bar']
assert p.skip == []
assert p.ACTION == 'run'
assert p.regions == []
assert p.config == 'manheim-c7n-tools.yml'
assert p.ACCT_NAME == 'aName'
assert p.assume_role is False
class FakeArgs(object):
verbose = 0
list = False
steps = []
skip = []
ACTION = None
regions = []
config = 'manheim-c7n-tools.yml'
ACCT_NAME = 'acctName'
assume_role = True
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestMain(object):
def test_run(self, capsys):
m_cr = Mock(spec_set=runner.CustodianRunner)
m_conf = Mock(spec_set=ManheimConfig)
type(m_cr).config = m_conf
with patch.multiple(
pbm,
autospec=True,
parse_args=DEFAULT,
set_log_debug=DEFAULT,
set_log_info=DEFAULT,
CustodianRunner=DEFAULT,
ManheimConfig=DEFAULT,
assume_role=DEFAULT
) as mocks:
mocks['parse_args'].return_value = FakeArgs(
ACTION='run', regions=['foo2'], assume_role=False
)
mocks['CustodianRunner'].return_value = m_cr
runner.main()
captured = capsys.readouterr()
assert mocks['parse_args'].mock_calls == [
call(sys.argv[1:])
]
assert mocks['set_log_debug'].mock_calls == []
assert mocks['set_log_info'].mock_calls == []
assert mocks['CustodianRunner'].mock_calls == [
call('acctName', 'manheim-c7n-tools.yml'),
call().run(
'run', ['foo2'], step_names=[], skip_steps=[]
)
]
assert mocks['ManheimConfig'].mock_calls == []
assert captured.out == ''
assert captured.err == ''
assert mocks['assume_role'].mock_calls == []
def test_info_list(self, capsys):
osc = runner.CustodianRunner.ordered_step_classes
m_cr = Mock(spec_set=runner.CustodianRunner)
m_conf = Mock(spec_set=ManheimConfig)
type(m_cr).config = m_conf
with patch.multiple(
pbm,
autospec=True,
parse_args=DEFAULT,
set_log_debug=DEFAULT,
set_log_info=DEFAULT,
CustodianRunner=DEFAULT,
ManheimConfig=DEFAULT,
assume_role=DEFAULT
) as mocks:
mocks['parse_args'].return_value = FakeArgs(
ACTION='list', verbose=1
)
mocks['CustodianRunner'].return_value = m_cr
type(mocks['CustodianRunner']).ordered_step_classes = osc
with pytest.raises(SystemExit) as exc:
runner.main()
assert exc.value.code == 0
captured = capsys.readouterr()
assert mocks['parse_args'].mock_calls == [
call(sys.argv[1:])
]
assert mocks['set_log_debug'].mock_calls == []
assert mocks['set_log_info'].mock_calls == [call(runner.logger)]
assert mocks['ManheimConfig'].mock_calls == []
expected = "\n".join(
x.name for x in osc
)
assert captured.out == expected + "\n"
assert captured.err == ''
assert mocks['assume_role'].mock_calls == []
def test_accounts(self, capsys):
m_conf = Mock(spec_set=ManheimConfig)
osc = runner.CustodianRunner.ordered_step_classes
m_cr = Mock(spec_set=runner.CustodianRunner)
type(m_cr).config = m_conf
with patch.multiple(
pbm,
autospec=True,
parse_args=DEFAULT,
set_log_debug=DEFAULT,
set_log_info=DEFAULT,
CustodianRunner=DEFAULT,
ManheimConfig=DEFAULT,
assume_role=DEFAULT
) as mocks:
mocks['parse_args'].return_value = FakeArgs(ACTION='accounts')
mocks['CustodianRunner'].return_value = m_cr
type(mocks['CustodianRunner']).ordered_step_classes = osc
type(mocks['ManheimConfig']).return_value = m_conf
with patch('%s.ManheimConfig.list_accounts' % pbm) as mock_la:
mock_la.return_value = {
'acct1': 1111,
'acct3': 3333,
'acct2': 2222
}
with pytest.raises(SystemExit) as exc:
runner.main()
assert exc.value.code == 0
captured = capsys.readouterr()
assert mocks['parse_args'].mock_calls == [
call(sys.argv[1:])
]
assert mocks['set_log_debug'].mock_calls == []
assert mocks['set_log_info'].mock_calls == []
assert mocks['ManheimConfig'].mock_calls == []
assert mock_la.mock_calls == [call('manheim-c7n-tools.yml')]
assert captured.out == "acct1 (1111)\nacct2 (2222)\nacct3 (3333)\n"
assert captured.err == ''
assert mocks['assume_role'].mock_calls == []
def test_debug_dryrun_assume_role(self, capsys):
m_cr = Mock(spec_set=runner.CustodianRunner)
m_conf = Mock(spec_set=ManheimConfig)
type(m_cr).config = m_conf
with patch.multiple(
pbm,
autospec=True,
parse_args=DEFAULT,
set_log_debug=DEFAULT,
set_log_info=DEFAULT,
CustodianRunner=DEFAULT,
ManheimConfig=DEFAULT,
assume_role=DEFAULT
) as mocks:
mocks['parse_args'].return_value = FakeArgs(
ACTION='dryrun', verbose=2, steps=['foo'], skip=['bar'],
config='foo.yml', ACCT_NAME='aName', assume_role=True
)
mocks['CustodianRunner'].return_value = m_cr
runner.main()
captured = capsys.readouterr()
assert mocks['parse_args'].mock_calls == [
call(sys.argv[1:])
]
assert mocks['set_log_debug'].mock_calls == [call(runner.logger)]
assert mocks['set_log_info'].mock_calls == []
assert mocks['CustodianRunner'].mock_calls == [
call('aName', 'foo.yml'),
call().run(
'dryrun', [], step_names=['foo'], skip_steps=['bar']
)
]
assert mocks['ManheimConfig'].mock_calls == []
assert captured.out == ''
assert captured.err == ''
assert mocks['assume_role'].mock_calls == [call(m_conf)]
| python | 54,549 |
"""
Tests distcache/utils.py
Testing objective:
Is anything lost while transferring between server and clients.
Send and receive objects. If you what to expect, you can test it.
Testing idea:
The clients and servers are going to send and receive things and they would know what to expect as well.
Send and receive different types of things.
Fibonacci series with numbers
Squares of 0.9 for k iteration
Send and receive all letters
Echo servers. Repeat 1000 iterations to see if the message changes.
Echo servers but for floats
"""
import unittest
from distcache import utils
class Server:
def __init__(self):
pass
def send(self):
pass
class Client:
def __init__(self):
pass
def send(self):
pass
class NetworkUtilsTest(unittest.TestCase):
def test_something(self):
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
| python | 949 |
from customer.models import CustomerProfile
def get_customer_instance(auth_user_instance):
"""get_customer_instance
"""
try:
return CustomerProfile.objects.get(auth_user=auth_user_instance)
except Exception as e:
return None | python | 235 |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Othello Program
# John Fish
# Updated from May 29, 2015 - June 26, 2015
#
# Has both basic AI (random decision) as well as
# educated AI (minimax).
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Library import
from tkinter import *
from math import *
from time import *
from random import *
from copy import deepcopy
#Variable setup
nodes = 0
depth = 4
moves = 0
#Tkinter setup
root = Tk()
screen = Canvas(root, width=500, height=600, background="#222",highlightthickness=0)
screen.pack()
class Board:
def __init__(self):
#White goes first (0 is white and player,1 is black and computer)
self.player = 0
self.passed = False
self.won = False
#Initializing an empty board
self.array = []
for x in range(8):
self.array.append([])
for y in range(8):
self.array[x].append(None)
#Initializing center values
self.array[3][3]="w"
self.array[3][4]="b"
self.array[4][3]="b"
self.array[4][4]="w"
#Initializing old values
self.oldarray = self.array
#Updating the board to the screen
def update(self):
screen.delete("highlight")
screen.delete("tile")
for x in range(8):
for y in range(8):
#Could replace the circles with images later, if I want
if self.oldarray[x][y]=="w":
screen.create_oval(54+50*x,54+50*y,96+50*x,96+50*y,tags="tile {0}-{1}".format(x,y),fill="#aaa",outline="#aaa")
screen.create_oval(54+50*x,52+50*y,96+50*x,94+50*y,tags="tile {0}-{1}".format(x,y),fill="#fff",outline="#fff")
elif self.oldarray[x][y]=="b":
screen.create_oval(54+50*x,54+50*y,96+50*x,96+50*y,tags="tile {0}-{1}".format(x,y),fill="#000",outline="#000")
screen.create_oval(54+50*x,52+50*y,96+50*x,94+50*y,tags="tile {0}-{1}".format(x,y),fill="#111",outline="#111")
#Animation of new tiles
screen.update()
for x in range(8):
for y in range(8):
#Could replace the circles with images later, if I want
if self.array[x][y]!=self.oldarray[x][y] and self.array[x][y]=="w":
screen.delete("{0}-{1}".format(x,y))
#42 is width of tile so 21 is half of that
#Shrinking
for i in range(21):
screen.create_oval(54+i+50*x,54+i+50*y,96-i+50*x,96-i+50*y,tags="tile animated",fill="#000",outline="#000")
screen.create_oval(54+i+50*x,52+i+50*y,96-i+50*x,94-i+50*y,tags="tile animated",fill="#111",outline="#111")
if i%3==0:
sleep(0.01)
screen.update()
screen.delete("animated")
#Growing
for i in reversed(range(21)):
screen.create_oval(54+i+50*x,54+i+50*y,96-i+50*x,96-i+50*y,tags="tile animated",fill="#aaa",outline="#aaa")
screen.create_oval(54+i+50*x,52+i+50*y,96-i+50*x,94-i+50*y,tags="tile animated",fill="#fff",outline="#fff")
if i%3==0:
sleep(0.01)
screen.update()
screen.delete("animated")
screen.create_oval(54+50*x,54+50*y,96+50*x,96+50*y,tags="tile",fill="#aaa",outline="#aaa")
screen.create_oval(54+50*x,52+50*y,96+50*x,94+50*y,tags="tile",fill="#fff",outline="#fff")
screen.update()
elif self.array[x][y]!=self.oldarray[x][y] and self.array[x][y]=="b":
screen.delete("{0}-{1}".format(x,y))
#42 is width of tile so 21 is half of that
#Shrinking
for i in range(21):
screen.create_oval(54+i+50*x,54+i+50*y,96-i+50*x,96-i+50*y,tags="tile animated",fill="#aaa",outline="#aaa")
screen.create_oval(54+i+50*x,52+i+50*y,96-i+50*x,94-i+50*y,tags="tile animated",fill="#fff",outline="#fff")
if i%3==0:
sleep(0.01)
screen.update()
screen.delete("animated")
#Growing
for i in reversed(range(21)):
screen.create_oval(54+i+50*x,54+i+50*y,96-i+50*x,96-i+50*y,tags="tile animated",fill="#000",outline="#000")
screen.create_oval(54+i+50*x,52+i+50*y,96-i+50*x,94-i+50*y,tags="tile animated",fill="#111",outline="#111")
if i%3==0:
sleep(0.01)
screen.update()
screen.delete("animated")
screen.create_oval(54+50*x,54+50*y,96+50*x,96+50*y,tags="tile",fill="#000",outline="#000")
screen.create_oval(54+50*x,52+50*y,96+50*x,94+50*y,tags="tile",fill="#111",outline="#111")
screen.update()
#Drawing of highlight circles
for x in range(8):
for y in range(8):
if self.player == 0:
if valid(self.array,self.player,x,y):
screen.create_oval(68+50*x,68+50*y,32+50*(x+1),32+50*(y+1),tags="highlight",fill="#008000",outline="#008000")
if not self.won:
#Draw the scoreboard and update the screen
self.drawScoreBoard()
screen.update()
#If the computer is AI, make a move
if self.player==1:
startTime = time()
self.oldarray = self.array
alphaBetaResult = self.alphaBeta(self.array,depth,-float("inf"),float("inf"),1)
self.array = alphaBetaResult[1]
if len(alphaBetaResult)==3:
position = alphaBetaResult[2]
self.oldarray[position[0]][position[1]]="b"
self.player = 1-self.player
deltaTime = round((time()-startTime)*100)/100
if deltaTime<2:
sleep(2-deltaTime)
nodes = 0
#Player must pass?
self.passTest()
else:
screen.create_text(250,550,anchor="c",font=("Consolas",15), text="The game is done!")
#Moves to position
def boardMove(self,x,y):
global nodes
#Move and update screen
self.oldarray = self.array
self.oldarray[x][y]="w"
self.array = move(self.array,x,y)
#Switch Player
self.player = 1-self.player
self.update()
#Check if ai must pass
self.passTest()
self.update()
#METHOD: Draws scoreboard to screen
def drawScoreBoard(self):
global moves
#Deleting prior score elements
screen.delete("score")
#Scoring based on number of tiles
player_score = 0
computer_score = 0
for x in range(8):
for y in range(8):
if self.array[x][y]=="w":
player_score+=1
elif self.array[x][y]=="b":
computer_score+=1
if self.player==0:
player_colour = "green"
computer_colour = "gray"
else:
player_colour = "gray"
computer_colour = "green"
screen.create_oval(5,540,25,560,fill=player_colour,outline=player_colour)
screen.create_oval(380,540,400,560,fill=computer_colour,outline=computer_colour)
#Pushing text to screen
screen.create_text(30,550,anchor="w", tags="score",font=("Consolas", 50),fill="white",text=player_score)
screen.create_text(400,550,anchor="w", tags="score",font=("Consolas", 50),fill="black",text=computer_score)
moves = player_score+computer_score
#METHOD: Test if player must pass: if they do, switch the player
def passTest(self):
mustPass = True
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
mustPass=False
if mustPass:
self.player = 1-self.player
if self.passed==True:
self.won = True
else:
self.passed = True
self.update()
else:
self.passed = False
#METHOD: Stupid AI - Chooses a random move
def dumbMove(self):
#Generates all possible moves
choices = []
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
choices.append([x,y])
#Chooses a random move, moves there
dumbChoice = choice(choices)
self.arrayMove(dumbChoice[0],dumbChoice[1])
#METHOD: Not so stupid AI - Chooses a move based on what will get it the most pieces next turn
def slightlyLessDumbMove(self):
#Generates all possible choices and boards corresponding to those
boards = []
choices = []
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
test = move(self.array,x,y)
boards.append(test)
choices.append([x,y])
#Determines the best score based on the prior generated boards and a "Dumb" Heuristic: dumbScore()
bestScore = -float("inf")
bestIndex = 0
for i in range(len(boards)):
score= dumbScore(boards[i],self.player)
if score>bestScore:
bestIndex=i
bestScore = score
#Move to the best location based on dumbScore()
self.arrayMove(choices[bestIndex][0],choices[bestIndex][1])
#METHOD: Actually Decent AI - Choose a move based on a simple heuristic
#Same as slightlyLessDumbMove() just uses slightlyLessDumbScore()
def decentMove(self):
#Generates all possible choices and boards corresponding to those
boards = []
choices = []
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
test = move(self.array,x,y)
boards.append(test)
choices.append([x,y])
bestScore = -float("inf")
bestIndex = 0
#Determines the best score based on the prior generated boards and a "Meh" Heuristic: slightlyLessDumbScore()
for i in range(len(boards)):
score= slightlyLessDumbScore(boards[i],self.player)
if score>bestScore:
bestIndex=i
bestScore = score
#Move to the best location based on slightlyLessDumbScore()
self.arrayMove(choices[bestIndex][0],choices[bestIndex][1])
#This contains the minimax algorithm
#http://en.wikipedia.org/wiki/Minimax
def minimax(self, node, depth, maximizing):
global nodes
nodes += 1
boards = []
choices = []
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
test = move(node,x,y)
boards.append(test)
choices.append([x,y])
if depth==0 or len(choices)==0:
return ([decentHeuristic(node,1-maximizing),node])
if maximizing:
bestValue = -float("inf")
bestBoard = []
for board in boards:
val = self.minimax(board,depth-1,0)[0]
if val>bestValue:
bestValue = val
bestBoard = board
return ([bestValue,bestBoard])
else:
bestValue = float("inf")
bestBoard = []
for board in boards:
val = self.minimax(board,depth-1,1)[0]
if val<bestValue:
bestValue = val
bestBoard = board
return ([bestValue,bestBoard])
#alphaBeta pruning on the minimax tree
#http://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
def alphaBeta(self,node,depth,alpha,beta,maximizing):
global nodes
nodes += 1
boards = []
choices = []
for x in range(8):
for y in range(8):
if valid(self.array,self.player,x,y):
test = move(node,x,y)
boards.append(test)
choices.append([x,y])
if depth==0 or len(choices)==0:
return ([finalHeuristic(node,maximizing),node])
if maximizing:
v = -float("inf")
bestBoard = []
bestChoice = []
for board in boards:
boardValue = self.alphaBeta(board,depth-1,alpha,beta,0)[0]
if boardValue>v:
v = boardValue
bestBoard = board
bestChoice = choices[boards.index(board)]
alpha = max(alpha,v)
if beta <= alpha:
break
return([v,bestBoard,bestChoice])
else:
v = float("inf")
bestBoard = []
bestChoice = []
for board in boards:
boardValue = self.alphaBeta(board,depth-1,alpha,beta,1)[0]
if boardValue<v:
v = boardValue
bestBoard = board
bestChoice = choices[boards.index(board)]
beta = min(beta,v)
if beta<=alpha:
break
return([v,bestBoard,bestChoice])
#FUNCTION: Returns a board after making a move according to Othello rules
#Assumes the move is valid
def move(passedArray,x,y):
#Must copy the passedArray so we don't alter the original
array = deepcopy(passedArray)
#Set colour and set the moved location to be that colour
if board.player==0:
colour = "w"
else:
colour="b"
array[x][y]=colour
#Determining the neighbours to the square
neighbours = []
for i in range(max(0,x-1),min(x+2,8)):
for j in range(max(0,y-1),min(y+2,8)):
if array[i][j]!=None:
neighbours.append([i,j])
#Which tiles to convert
convert = []
#For all the generated neighbours, determine if they form a line
#If a line is formed, we will add it to the convert array
for neighbour in neighbours:
neighX = neighbour[0]
neighY = neighbour[1]
#Check if the neighbour is of a different colour - it must be to form a line
if array[neighX][neighY]!=colour:
#The path of each individual line
path = []
#Determining direction to move
deltaX = neighX-x
deltaY = neighY-y
tempX = neighX
tempY = neighY
#While we are in the bounds of the board
while 0<=tempX<=7 and 0<=tempY<=7:
path.append([tempX,tempY])
value = array[tempX][tempY]
#If we reach a blank tile, we're done and there's no line
if value==None:
break
#If we reach a tile of the player's colour, a line is formed
if value==colour:
#Append all of our path nodes to the convert array
for node in path:
convert.append(node)
break
#Move the tile
tempX+=deltaX
tempY+=deltaY
#Convert all the appropriate tiles
for node in convert:
array[node[0]][node[1]]=colour
return array
#Method for drawing the gridlines
def drawGridBackground(outline=False):
#If we want an outline on the board then draw one
if outline:
screen.create_rectangle(50,50,450,450,outline="#111")
#Drawing the intermediate lines
for i in range(7):
lineShift = 50+50*(i+1)
#Horizontal line
screen.create_line(50,lineShift,450,lineShift,fill="#111")
#Vertical line
screen.create_line(lineShift,50,lineShift,450,fill="#111")
screen.update()
#Simple heuristic. Compares number of each tile.
def dumbScore(array,player):
score = 0
#Set player and opponent colours
if player==1:
colour="b"
opponent="w"
else:
colour = "w"
opponent = "b"
#+1 if it's player colour, -1 if it's opponent colour
for x in range(8):
for y in range(8):
if array[x][y]==colour:
score+=1
elif array[x][y]==opponent:
score-=1
return score
#Less simple but still simple heuristic. Weights corners and edges as more
def slightlyLessDumbScore(array,player):
score = 0
#Set player and opponent colours
if player==1:
colour="b"
opponent="w"
else:
colour = "w"
opponent = "b"
#Go through all the tiles
for x in range(8):
for y in range(8):
#Normal tiles worth 1
add = 1
#Edge tiles worth 3
if (x==0 and 1<y<6) or (x==7 and 1<y<6) or (y==0 and 1<x<6) or (y==7 and 1<x<6):
add=3
#Corner tiles worth 5
elif (x==0 and y==0) or (x==0 and y==7) or (x==7 and y==0) or (x==7 and y==7):
add = 5
#Add or subtract the value of the tile corresponding to the colour
if array[x][y]==colour:
score+=add
elif array[x][y]==opponent:
score-=add
return score
#Heuristic that weights corner tiles and edge tiles as positive, adjacent to corners (if the corner is not yours) as negative
#Weights other tiles as one point
def decentHeuristic(array,player):
score = 0
cornerVal = 25
adjacentVal = 5
sideVal = 5
#Set player and opponent colours
if player==1:
colour="b"
opponent="w"
else:
colour = "w"
opponent = "b"
#Go through all the tiles
for x in range(8):
for y in range(8):
#Normal tiles worth 1
add = 1
#Adjacent to corners are worth -3
if (x==0 and y==1) or (x==1 and 0<=y<=1):
if array[0][0]==colour:
add = sideVal
else:
add = -adjacentVal
elif (x==0 and y==6) or (x==1 and 6<=y<=7):
if array[7][0]==colour:
add = sideVal
else:
add = -adjacentVal
elif (x==7 and y==1) or (x==6 and 0<=y<=1):
if array[0][7]==colour:
add = sideVal
else:
add = -adjacentVal
elif (x==7 and y==6) or (x==6 and 6<=y<=7):
if array[7][7]==colour:
add = sideVal
else:
add = -adjacentVal
#Edge tiles worth 3
elif (x==0 and 1<y<6) or (x==7 and 1<y<6) or (y==0 and 1<x<6) or (y==7 and 1<x<6):
add=sideVal
#Corner tiles worth 15
elif (x==0 and y==0) or (x==0 and y==7) or (x==7 and y==0) or (x==7 and y==7):
add = cornerVal
#Add or subtract the value of the tile corresponding to the colour
if array[x][y]==colour:
score+=add
elif array[x][y]==opponent:
score-=add
return score
#Seperating the use of heuristics for early/mid/late game.
def finalHeuristic(array,player):
if moves<=8:
numMoves = 0
for x in range(8):
for y in range(8):
if valid(array,player,x,y):
numMoves += 1
return numMoves+decentHeuristic(array,player)
elif moves<=52:
return decentHeuristic(array,player)
elif moves<=58:
return slightlyLessDumbScore(array,player)
else:
return dumbScore(array,player)
#Checks if a move is valid for a given array.
def valid(array,player,x,y):
#Sets player colour
if player==0:
colour="w"
else:
colour="b"
#If there's already a piece there, it's an invalid move
if array[x][y]!=None:
return False
else:
#Generating the list of neighbours
neighbour = False
neighbours = []
for i in range(max(0,x-1),min(x+2,8)):
for j in range(max(0,y-1),min(y+2,8)):
if array[i][j]!=None:
neighbour=True
neighbours.append([i,j])
#If there's no neighbours, it's an invalid move
if not neighbour:
return False
else:
#Iterating through neighbours to determine if at least one line is formed
valid = False
for neighbour in neighbours:
neighX = neighbour[0]
neighY = neighbour[1]
#If the neighbour colour is equal to your colour, it doesn't form a line
#Go onto the next neighbour
if array[neighX][neighY]==colour:
continue
else:
#Determine the direction of the line
deltaX = neighX-x
deltaY = neighY-y
tempX = neighX
tempY = neighY
while 0<=tempX<=7 and 0<=tempY<=7:
#If an empty space, no line is formed
if array[tempX][tempY]==None:
break
#If it reaches a piece of the player's colour, it forms a line
if array[tempX][tempY]==colour:
valid=True
break
#Move the index according to the direction of the line
tempX+=deltaX
tempY+=deltaY
return valid
#When the user clicks, if it's a valid move, make the move
def clickHandle(event):
global depth
xMouse = event.x
yMouse = event.y
if running:
if xMouse>=450 and yMouse<=50:
root.destroy()
elif xMouse<=50 and yMouse<=50:
playGame()
else:
#Is it the player's turn?
if board.player==0:
#Delete the highlights
x = int((event.x-50)/50)
y = int((event.y-50)/50)
#Determine the grid index for where the mouse was clicked
#If the click is inside the bounds and the move is valid, move to that location
if 0<=x<=7 and 0<=y<=7:
if valid(board.array,board.player,x,y):
board.boardMove(x,y)
else:
#Difficulty clicking
if 300<=yMouse<=350:
#One star
if 25<=xMouse<=155:
depth = 1
playGame()
#Two star
elif 180<=xMouse<=310:
depth = 4
playGame()
#Three star
elif 335<=xMouse<=465:
depth = 6
playGame()
def keyHandle(event):
symbol = event.keysym
if symbol.lower()=="r":
playGame()
elif symbol.lower()=="q":
root.destroy()
def create_buttons():
#Restart button
#Background/shadow
screen.create_rectangle(0,5,50,55,fill="#000033", outline="#000033")
screen.create_rectangle(0,0,50,50,fill="#000088", outline="#000088")
#Arrow
screen.create_arc(5,5,45,45,fill="#000088", width="2",style="arc",outline="white",extent=300)
screen.create_polygon(33,38,36,45,40,39,fill="white",outline="white")
#Quit button
#Background/shadow
screen.create_rectangle(450,5,500,55,fill="#330000", outline="#330000")
screen.create_rectangle(450,0,500,50,fill="#880000", outline="#880000")
#"X"
screen.create_line(455,5,495,45,fill="white",width="3")
screen.create_line(495,5,455,45,fill="white",width="3")
def runGame():
global running
running = False
#Title and shadow
screen.create_text(250,203,anchor="c",text="Othello",font=("Consolas", 50),fill="#aaa")
screen.create_text(250,200,anchor="c",text="Othello",font=("Consolas", 50),fill="#fff")
#Creating the difficulty buttons
for i in range(3):
#Background
screen.create_rectangle(25+155*i, 310, 155+155*i, 355, fill="#000", outline="#000")
screen.create_rectangle(25+155*i, 300, 155+155*i, 350, fill="#111", outline="#111")
spacing = 130/(i+2)
for x in range(i+1):
#Star with double shadow
screen.create_text(25+(x+1)*spacing+155*i,326,anchor="c",text="\u2605", font=("Consolas", 25),fill="#b29600")
screen.create_text(25+(x+1)*spacing+155*i,327,anchor="c",text="\u2605", font=("Consolas",25),fill="#b29600")
screen.create_text(25+(x+1)*spacing+155*i,325,anchor="c",text="\u2605", font=("Consolas", 25),fill="#ffd700")
screen.update()
def playGame():
global board, running
running = True
screen.delete(ALL)
create_buttons()
board = 0
#Draw the background
drawGridBackground()
#Create the board and update it
board = Board()
board.update()
runGame()
#Binding, setting
screen.bind("<Button-1>", clickHandle)
screen.bind("<Key>",keyHandle)
screen.focus_set()
#Run forever
root.wm_title("Othello")
root.mainloop() | python | 20,487 |
#!/usr/bin/env python
from unittest import TestCase, main
from numpy import alltrue, array, transpose
from cogent3.core.alignment import Alignment, ArrayAlignment
from cogent3.core.moltype import RNA
from cogent3.core.sequence import ArraySequence, RnaSequence
__author__ = "Sandra Smit"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Sandra Smit", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.12.21a"
__maintainer__ = "Sandra Smit"
__email__ = "[email protected]"
__status__ = "Production"
from numpy.testing import assert_equal
class AllTests(TestCase):
def setUp(self):
"""setUp method for all tests"""
# named sequences
self.rna1 = RnaSequence("UCAGGG", name="rna1")
self.rna2 = RnaSequence("YCU-RG", name="rna2")
self.rna3 = RnaSequence("CAA-NR", name="rna3")
self.model1 = ArraySequence(
"UCAGGG", name="rna1", alphabet=RNA.alphabets.degen_gapped
)
self.model2 = ArraySequence(
"YCU-RG", name="rna2", alphabet=RNA.alphabets.degen_gapped
)
self.model3 = ArraySequence(
"CAA-NR", name="rna3", alphabet=RNA.alphabets.degen_gapped
)
self.aln = Alignment([self.rna1, self.rna2, self.rna3], moltype=RNA)
self.da = ArrayAlignment(
[self.model1, self.model2, self.model3],
moltype=RNA,
alphabet=RNA.alphabets.degen_gapped,
)
# seqs no name
self.nn_rna1 = RnaSequence("UCAGGG")
self.nn_rna2 = RnaSequence("YCU-RG")
self.nn_rna3 = RnaSequence("CAA-NR")
self.nn_model1 = ArraySequence("UCAGGG", alphabet=RNA.alphabets.degen_gapped)
self.nn_model2 = ArraySequence("YCU-RG", alphabet=RNA.alphabets.degen_gapped)
self.nn_model3 = ArraySequence("CAA-NR", alphabet=RNA.alphabets.degen_gapped)
self.nn_aln = Alignment([self.nn_rna1, self.nn_rna2, self.nn_rna3], moltype=RNA)
self.nn_da = ArrayAlignment(
[self.nn_model1, self.nn_model2, self.nn_model3],
moltype=RNA,
alphabet=RNA.alphabets.degen_gapped,
)
def test_printing_named_seqs(self):
"""Printing named seqs should work the same on Aln and DenseAln"""
# Note: the newline trailing each sequence is intentional, because
# we want each FASTA-format record to be separated.
exp_lines_general = [">rna1", "UCAGGG", ">rna2", "YCU-RG", ">rna3", "CAA-NR"]
self.assertEqual(str(self.aln), "\n".join(exp_lines_general) + "\n")
self.assertEqual(str(self.da), "\n".join(exp_lines_general) + "\n")
def test_printing_unnamed_seqs(self):
"""Printing unnamed sequences should work the same on Aln and DenseAln"""
exp_lines_gen = [">seq_0", "UCAGGG", ">seq_1", "YCU-RG", ">seq_2", "CAA-NR\n"]
self.assertEqual(str(self.nn_aln), "\n".join(exp_lines_gen))
self.assertEqual(str(self.nn_da), "\n".join(exp_lines_gen))
def test_ArrayAlignment_without_moltype(self):
"""Expect MolType to be picked up from the sequences."""
m1 = ArraySequence("UCAG", alphabet=RNA.alphabets.degen_gapped, name="rna1")
m2 = ArraySequence("CCCR", alphabet=RNA.alphabets.degen_gapped, name="rna2")
da = ArrayAlignment([m1, m2])
exp_lines = [">rna1", "UCAG", ">rna2", "CCCR"]
self.assertEqual(str(da), "\n".join(exp_lines) + "\n")
def test_names(self):
# Should both alignments handle names the same way?
self.assertEqual(self.aln.names, ["rna1", "rna2", "rna3"])
self.assertEqual(self.da.names, ["rna1", "rna2", "rna3"])
# On unnamed sequences the behavior is now the same.
self.assertEqual(self.nn_aln.names, ["seq_0", "seq_1", "seq_2"])
self.assertEqual(self.nn_da.names, ["seq_0", "seq_1", "seq_2"])
def test_seqFreqs(self):
"""seqFreqs should work the same on Alignment and ArrayAlignment"""
get_index = RNA.alphabets.degen_gapped.index
# 'UCAGGG'
# 'YCU-RG'
# 'CAA-NR'
expected_counts = {
0: {"U": 1, "C": 1, "A": 1, "G": 3},
1: {"Y": 1, "C": 1, "U": 1, "-": 1, "R": 1, "G": 1},
2: {"C": 1, "A": 2, "-": 1, "N": 1, "R": 1},
}
got1 = self.da.counts_per_seq(allow_gap=True, include_ambiguity=True)
got2 = self.aln.counts_per_seq(allow_gap=True, include_ambiguity=True)
for pos, counts in expected_counts.items():
for char in counts:
self.assertEqual(got1[pos, char], expected_counts[pos][char])
self.assertEqual(got2[pos, char], expected_counts[pos][char])
def test_subset_positions_ArrayAlignment(self):
# because dict order volatile, need to grab the
# the index for ambig characters from the object
# The full data comes from these seqs
# 'UCAGGG'
# 'YCU-RG'
# 'CAA-NR'
get_index = RNA.alphabets.degen_gapped.index
G = get_index("-")
N = get_index("N")
R = get_index("R")
Y = get_index("Y")
full_data = array([[0, 1, 2, 3, 3, 3], [Y, 1, 0, G, R, 3], [1, 2, 2, G, N, R]])
model1 = ArraySequence("UCG", name="rna1", alphabet=RNA.alphabets.degen_gapped)
model2 = ArraySequence("YCG", name="rna2", alphabet=RNA.alphabets.degen_gapped)
model3 = ArraySequence("CAR", name="rna3", alphabet=RNA.alphabets.degen_gapped)
sub_da = ArrayAlignment(
[model1, model2, model3], moltype=RNA, alphabet=RNA.alphabets.degen_gapped
)
sub_data = array([[0, 1, 3], [Y, 1, 3], [1, 2, R]])
# First check some data
assert_equal(self.da.array_seqs, full_data)
assert_equal(self.da.array_positions, transpose(full_data))
assert_equal(sub_da.array_seqs, sub_data)
assert_equal(sub_da.array_positions, transpose(sub_data))
obs_sub_da_TP = self.da.take_positions([0, 1, 5])
obs_sub_da_SA = self.da.get_sub_alignment(pos=[0, 1, 5])
# When using the get_sub_alignment method the data is right
self.assertEqual(obs_sub_da_SA, sub_da)
self.assertNotEqual(obs_sub_da_SA, self.da)
assert_equal(obs_sub_da_SA.array_seqs, sub_data)
assert_equal(obs_sub_da_SA.array_positions, transpose(sub_data))
# For the take_positions method: Why does this work
self.assertEqual(obs_sub_da_TP, sub_da)
self.assertNotEqual(obs_sub_da_TP, self.da)
# If the data doesn't match?
assert_equal(obs_sub_da_TP.array_seqs, sub_data)
assert_equal(obs_sub_da_TP.array_positions, transpose(sub_data))
# Shouldn't the __eq__ method check the data at least?
def test_subset_positions_Alignment(self):
rna1 = RnaSequence("UCG", name="rna1")
rna2 = RnaSequence("YCG", name="rna2")
rna3 = RnaSequence("CAR", name="rna3")
sub_aln = Alignment([rna1, rna2, rna3], moltype=RNA)
obs_sub_aln = self.aln.take_positions([0, 1, 5])
self.assertEqual(obs_sub_aln, sub_aln)
self.assertNotEqual(obs_sub_aln, self.aln)
# string representations should be the same. This fails right
# now, because sequence order is not maintained. See separate test.
self.assertEqual(str(obs_sub_aln), str(sub_aln))
def test_take_positions_sequence_order(self):
"""Alignment take_positions should maintain seq order"""
# This works
self.assertEqual(self.da.names, ["rna1", "rna2", "rna3"])
sub_da = self.da.get_sub_alignment(pos=[0, 1, 5])
self.assertEqual(sub_da.names, ["rna1", "rna2", "rna3"])
# seq order not maintained in Alignment
self.assertEqual(self.aln.names, ["rna1", "rna2", "rna3"])
sub_aln = self.aln.take_positions([0, 1, 5])
self.assertEqual(sub_aln.names, ["rna1", "rna2", "rna3"])
def test_subset_seqs_Alignment(self):
rna1 = RnaSequence("UCG", name="rna1")
rna2 = RnaSequence("YCG", name="rna2")
rna3 = RnaSequence("CAR", name="rna3")
sub_aln = Alignment([rna2, rna3], moltype=RNA)
aln = Alignment([rna1, rna2, rna3], moltype=RNA)
obs_sub_aln = aln.take_seqs(["rna2", "rna3"])
self.assertEqual(obs_sub_aln, sub_aln)
self.assertEqual(str(obs_sub_aln), str(sub_aln))
# Selected sequences should be in specified order?
obs_sub_aln_1 = self.aln.take_seqs(["rna3", "rna2"])
obs_sub_aln_2 = self.aln.take_seqs(["rna2", "rna3"])
self.assertNotEqual(str(obs_sub_aln_1), str(obs_sub_aln_2))
def test_subset_seqs_ArrayAlignment(self):
model1 = ArraySequence("UCG", name="rna1", alphabet=RNA.alphabets.degen_gapped)
model2 = ArraySequence("YCG", name="rna2", alphabet=RNA.alphabets.degen_gapped)
model3 = ArraySequence("CAR", name="rna3", alphabet=RNA.alphabets.degen_gapped)
sub_da = ArrayAlignment(
[model1, model2, model3], moltype=RNA, alphabet=RNA.alphabets.degen_gapped
)
# take_seqs by name should have the same effect as
# get_sub_alignment by seq idx?
obs_sub_da_TS = self.da.take_seqs(["rna1"])
obs_sub_da_SA = self.da.get_sub_alignment(seqs=[0])
# These two are now the same. Fixed mapping of key to char array.
self.assertEqual(obs_sub_da_TS, obs_sub_da_SA)
self.assertEqual(str(obs_sub_da_TS), str(obs_sub_da_SA))
def test_aln_equality(self):
# When does something compare equal?
self.assertEqual(self.da == self.da, True)
# one sequence less
other_da1 = ArrayAlignment(
[self.model1, self.model2], moltype=RNA, alphabet=RNA.alphabets.degen_gapped
)
self.assertEqual(self.da == other_da1, False)
# seqs in different order -- doesn't matter
other_da2 = ArrayAlignment(
[self.model1, self.model3, self.model2],
moltype=RNA,
alphabet=RNA.alphabets.degen_gapped,
)
self.assertEqual(self.da == other_da2, True)
# seqs in different encoding -- doesn't matter, only looks at data
other_da3 = ArrayAlignment([self.model1, self.model2, self.model3])
# Should this compare False even though the data is exactly the same?
# The moltype is different...
self.assertEqual(self.da == other_da3, True)
assert alltrue(list(map(alltrue, self.da.array_seqs == other_da3.array_seqs)))
def test_seq_equality(self):
model1 = ArraySequence("UCG", name="rna1", alphabet=RNA.alphabets.degen_gapped)
model2 = ArraySequence("UCG", name="rna1", alphabet=RNA.alphabets.degen_gapped)
# Shouldn't the above two sequences be equal?
self.assertEqual(model1, model2)
# string comparison is True
self.assertEqual(str(model1), str(model2))
def test_seq_ungapping(self):
rna1 = RnaSequence("U-C-A-G-", name="rna1")
model1 = ArraySequence(
"U-C-A-G-", name="rna1", alphabet=RNA.alphabets.degen_gapped
)
self.assertEqual(rna1, "U-C-A-G-")
self.assertEqual(rna1.degap(), "UCAG")
# check is produces the right string from the beginning
self.assertEqual(str(model1), "U-C-A-G-")
assert_equal(model1._data, [0, 4, 1, 4, 2, 4, 3, 4])
# ArraySequence should maybe have the same degap method as normal seq
self.assertEqual(str(model1.degap()), "UCAG")
def test_the_rest_of_ModelSequence(self):
"""The class ArraySequence has 14 methods, but only 2 unittests.
You might want to add some tests there..."""
# note: mostly these are tested in derived classes, for convenience.
pass
if __name__ == "__main__":
main()
| python | 11,802 |
# See https://gist.github.com/roshammar
from flask import make_response
from functools import update_wrapper
def nocache(f):
"""
Add cache control headers to prevent web caches from caching results.
"""
def new_func(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
resp.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'
return resp
return update_wrapper(new_func, f)
| python | 466 |
import logging
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import boto3
import botocore
import neo4j
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
def link_aws_resources(neo4j_session: neo4j.Session, update_tag: int) -> None:
# find records that point to other records
link_records = """
MATCH (n:AWSDNSRecord) WITH n MATCH (v:AWSDNSRecord{value: n.name})
WHERE NOT n = v
MERGE (v)-[p:DNS_POINTS_TO]->(n)
ON CREATE SET p.firstseen = timestamp()
SET p.lastupdated = {update_tag}
"""
neo4j_session.run(link_records, update_tag=update_tag)
# find records that point to AWS LoadBalancers
link_elb = """
MATCH (n:AWSDNSRecord) WITH n MATCH (l:LoadBalancer{dnsname: n.value})
MERGE (n)-[p:DNS_POINTS_TO]->(l)
ON CREATE SET p.firstseen = timestamp()
SET p.lastupdated = {update_tag}
"""
neo4j_session.run(link_elb, update_tag=update_tag)
# find records that point to AWS LoadBalancersV2
link_elbv2 = """
MATCH (n:AWSDNSRecord) WITH n MATCH (l:LoadBalancerV2{dnsname: n.value})
MERGE (n)-[p:DNS_POINTS_TO]->(l)
ON CREATE SET p.firstseen = timestamp()
SET p.lastupdated = {update_tag}
"""
neo4j_session.run(link_elbv2, update_tag=update_tag)
# find records that point to AWS EC2 Instances
link_ec2 = """
MATCH (n:AWSDNSRecord) WITH n MATCH (e:EC2Instance{publicdnsname: n.value})
MERGE (n)-[p:DNS_POINTS_TO]->(e)
ON CREATE SET p.firstseen = timestamp()
SET p.lastupdated = {update_tag}
"""
neo4j_session.run(link_ec2, update_tag=update_tag)
@timeit
def load_a_records(neo4j_session: neo4j.Session, records: List[Dict], update_tag: int) -> None:
ingest_records = """
UNWIND {records} as record
MERGE (a:DNSRecord:AWSDNSRecord{id: record.id})
ON CREATE SET
a.firstseen = timestamp(),
a.name = record.name,
a.type = record.type
SET
a.lastupdated = {update_tag},
a.value = record.value
WITH a,record
MATCH (zone:AWSDNSZone{zoneid: record.zoneid})
MERGE (a)-[r:MEMBER_OF_DNS_ZONE]->(zone)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_records,
records=records,
update_tag=update_tag,
)
@timeit
def load_alias_records(neo4j_session: neo4j.Session, records: List[Dict], update_tag: int) -> None:
# create the DNSRecord nodes and link them to matching DNSZone and S3Bucket nodes
ingest_records = """
UNWIND {records} as record
MERGE (a:DNSRecord:AWSDNSRecord{id: record.id})
ON CREATE SET
a.firstseen = timestamp(),
a.name = record.name,
a.type = record.type
SET
a.lastupdated = {update_tag},
a.value = record.value
WITH a,record
MATCH (zone:AWSDNSZone{zoneid: record.zoneid})
MERGE (a)-[r:MEMBER_OF_DNS_ZONE]->(zone)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_records,
records=records,
update_tag=update_tag,
)
@timeit
def load_cname_records(neo4j_session: neo4j.Session, records: List[Dict], update_tag: int) -> None:
ingest_records = """
UNWIND {records} as record
MERGE (a:DNSRecord:AWSDNSRecord{id: record.id})
ON CREATE SET
a.firstseen = timestamp(),
a.name = record.name,
a.type = record.type
SET
a.lastupdated = {update_tag},
a.value = record.value
WITH a,record
MATCH (zone:AWSDNSZone{zoneid: record.zoneid})
MERGE (a)-[r:MEMBER_OF_DNS_ZONE]->(zone)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_records,
records=records,
update_tag=update_tag,
)
@timeit
def load_zone(neo4j_session: neo4j.Session, zone: Dict, current_aws_id: str, update_tag: int) -> None:
ingest_z = """
MERGE (zone:DNSZone:AWSDNSZone{zoneid:{ZoneId}})
ON CREATE SET
zone.firstseen = timestamp(),
zone.name = {ZoneName}
SET
zone.lastupdated = {update_tag},
zone.comment = {Comment},
zone.privatezone = {PrivateZone}
WITH zone
MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (aa)-[r:RESOURCE]->(zone)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_z,
ZoneName=zone['name'][:-1],
ZoneId=zone['zoneid'],
Comment=zone['comment'],
PrivateZone=zone['privatezone'],
AWS_ACCOUNT_ID=current_aws_id,
update_tag=update_tag,
)
@timeit
def load_ns_records(neo4j_session: neo4j.Session, records: List[Dict], zone_name: str, update_tag: int) -> None:
ingest_records = """
UNWIND {records} as record
MERGE (a:DNSRecord:AWSDNSRecord{id: record.id})
ON CREATE SET
a.firstseen = timestamp(),
a.name = record.name,
a.type = record.type
SET
a.lastupdated = {update_tag},
a.value = record.name
WITH a,record
MATCH (zone:AWSDNSZone{zoneid: record.zoneid})
MERGE (a)-[r:MEMBER_OF_DNS_ZONE]->(zone)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
WITH a,record
UNWIND record.servers as server
MERGE (ns:NameServer{id:server})
ON CREATE SET ns.firstseen = timestamp()
SET
ns.lastupdated = {update_tag},
ns.name = server
MERGE (a)-[pt:DNS_POINTS_TO]->(ns)
SET pt.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_records,
records=records,
update_tag=update_tag,
)
# Map the official name servers for a domain.
map_ns_records = """
UNWIND {servers} as server
MATCH (ns:NameServer{id:server})
MATCH (zone:AWSDNSZone{zoneid:{zoneid}})
MERGE (ns)<-[r:NAMESERVER]-(zone)
SET r.lastupdated = {update_tag}
"""
for record in records:
if zone_name == record["name"]:
neo4j_session.run(
map_ns_records,
servers=record["servers"],
zoneid=record["zoneid"],
update_tag=update_tag,
)
@timeit
def link_sub_zones(neo4j_session: neo4j.Session, update_tag: int) -> None:
query = """
match (z:AWSDNSZone)
<-[:MEMBER_OF_DNS_ZONE]-
(record:DNSRecord{type:"NS"})
-[:DNS_POINTS_TO]->
(ns:NameServer)
<-[:NAMESERVER]-
(z2)
WHERE record.name=z2.name AND NOT z=z2
MERGE (z2)<-[r:SUBZONE]-(z)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
query,
update_tag=update_tag,
)
@timeit
def transform_record_set(record_set: Dict, zone_id: str, name: str) -> Optional[Dict]:
# process CNAME, ALIAS and A records
if record_set['Type'] == 'CNAME':
if 'AliasTarget' in record_set:
# this is a weighted CNAME record
value = record_set['AliasTarget']['DNSName']
if value.endswith('.'):
value = value[:-1]
return {
"name": name,
"type": 'CNAME',
"zoneid": zone_id,
"value": value,
"id": _create_dns_record_id(zone_id, name, 'WEIGHTED_CNAME'),
}
else:
# This is a normal CNAME record
value = record_set['ResourceRecords'][0]['Value']
if value.endswith('.'):
value = value[:-1]
return {
"name": name,
"type": 'CNAME',
"zoneid": zone_id,
"value": value,
"id": _create_dns_record_id(zone_id, name, 'CNAME'),
}
elif record_set['Type'] == 'A':
if 'AliasTarget' in record_set:
# this is an ALIAS record
# ALIAS records are a special AWS-only type of A record
return {
"name": name,
"type": 'ALIAS',
"zoneid": zone_id,
"value": record_set['AliasTarget']['DNSName'][:-1],
"id": _create_dns_record_id(zone_id, name, 'ALIAS'),
}
else:
# this is a real A record
# loop and add each value (IP address) to a comma separated string
# don't forget to trim that trailing comma!
# TODO can this be replaced with a string join?
value = ''
for a_value in record_set['ResourceRecords']:
value = value + a_value['Value'] + ','
return {
"name": name,
"type": 'A',
"zoneid": zone_id,
"value": value[:-1],
"id": _create_dns_record_id(zone_id, name, 'A'),
}
else:
return None
@timeit
def transform_ns_record_set(record_set: Dict, zone_id: str) -> Optional[Dict]:
if "ResourceRecords" in record_set:
# Sometimes the value records have a trailing period, sometimes they dont.
servers = [_normalize_dns_address(record["Value"]) for record in record_set["ResourceRecords"]]
return {
"zoneid": zone_id,
"type": "NS",
# looks like "name.some.fqdn.net.", so this removes the trailing comma.
"name": _normalize_dns_address(record_set["Name"]),
"servers": servers,
"id": _create_dns_record_id(zone_id, record_set['Name'][:-1], 'NS'),
}
else:
return None
@timeit
def transform_zone(zone: Dict) -> Dict:
# TODO simplify this
if 'Comment' in zone['Config']:
comment = zone['Config']['Comment']
else:
comment = ''
return {
"zoneid": zone['Id'],
"name": zone['Name'],
"privatezone": zone['Config']['PrivateZone'],
"comment": comment,
"count": zone['ResourceRecordSetCount'],
}
@timeit
def load_dns_details(
neo4j_session: neo4j.Session, dns_details: List[Tuple[Dict, List[Dict]]], current_aws_id: str,
update_tag: int,
) -> None:
"""
Create the paths
(:AWSAccount)--(:AWSDNSZone)--(:AWSDNSRecord),
(:AWSDNSZone)--(:NameServer),
(:AWSDNSRecord{type:"NS"})-[:DNS_POINTS_TO]->(:NameServer),
(:AWSDNSRecord)-[:DNS_POINTS_TO]->(:AWSDNSRecord).
"""
for zone, zone_record_sets in dns_details:
zone_a_records = []
zone_alias_records = []
zone_cname_records = []
zone_ns_records = []
parsed_zone = transform_zone(zone)
load_zone(neo4j_session, parsed_zone, current_aws_id, update_tag)
for record_set in zone_record_sets:
if record_set['Type'] == 'A' or record_set['Type'] == 'CNAME':
record = transform_record_set(record_set, zone['Id'], record_set['Name'][:-1])
if record['type'] == 'A':
zone_a_records.append(record)
elif record['type'] == 'ALIAS':
zone_alias_records.append(record)
elif record['type'] == 'CNAME':
zone_cname_records.append(record)
if record_set['Type'] == 'NS':
record = transform_ns_record_set(record_set, zone['Id'])
zone_ns_records.append(record)
if zone_a_records:
load_a_records(neo4j_session, zone_a_records, update_tag)
if zone_alias_records:
load_alias_records(neo4j_session, zone_alias_records, update_tag)
if zone_cname_records:
load_cname_records(neo4j_session, zone_cname_records, update_tag)
if zone_ns_records:
load_ns_records(neo4j_session, zone_ns_records, parsed_zone['name'][:-1], update_tag)
link_aws_resources(neo4j_session, update_tag)
@timeit
def get_zone_record_sets(client: botocore.client.BaseClient, zone_id: str) -> List[Dict]:
resource_record_sets: List[Dict] = []
paginator = client.get_paginator('list_resource_record_sets')
pages = paginator.paginate(HostedZoneId=zone_id)
for page in pages:
resource_record_sets.extend(page['ResourceRecordSets'])
return resource_record_sets
@timeit
def get_zones(client: botocore.client.BaseClient) -> List[Tuple[Dict, List[Dict]]]:
paginator = client.get_paginator('list_hosted_zones')
hosted_zones: List[Dict] = []
for page in paginator.paginate():
hosted_zones.extend(page['HostedZones'])
results: List[Tuple[Dict, List[Dict]]] = []
for hosted_zone in hosted_zones:
record_sets = get_zone_record_sets(client, hosted_zone['Id'])
results.append((hosted_zone, record_sets))
return results
def _create_dns_record_id(zoneid: str, name: str, record_type: str) -> str:
return "/".join([zoneid, name, record_type])
def _normalize_dns_address(address: str) -> str:
return address.rstrip('.')
@timeit
def cleanup_route53(neo4j_session: neo4j.Session, current_aws_id: str, update_tag: int) -> None:
run_cleanup_job(
'aws_dns_cleanup.json',
neo4j_session,
{'UPDATE_TAG': update_tag, 'AWS_ID': current_aws_id},
)
@timeit
def sync(
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str], current_aws_account_id: str,
update_tag: int, common_job_parameters: Dict,
) -> None:
logger.info("Syncing Route53 for account '%s'.", current_aws_account_id)
client = boto3_session.client('route53')
zones = get_zones(client)
load_dns_details(neo4j_session, zones, current_aws_account_id, update_tag)
link_sub_zones(neo4j_session, update_tag)
cleanup_route53(neo4j_session, current_aws_account_id, update_tag)
| python | 14,128 |
"""
This file offers the methods to automatically retrieve the graph Pantoea sp. At9b.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:18:22.571076
The undirected graph Pantoea sp. At9b has 4054 nodes and 321586 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.03914 and has 8 connected components, where the component with most
nodes has 4032 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 124, the mean node degree is 158.65, and
the node degree mode is 77. The top 5 most central nodes are 592316.Pat9b_1927
(degree 1496), 592316.Pat9b_3948 (degree 1327), 592316.Pat9b_1136 (degree
1311), 592316.Pat9b_2027 (degree 1310) and 592316.Pat9b_1910 (degree 1210).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaSpAt9b
# Then load the graph
graph = PantoeaSpAt9b()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def PantoeaSpAt9b(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Pantoea sp. At9b graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Pantoea sp. At9b graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:18:22.571076
The undirected graph Pantoea sp. At9b has 4054 nodes and 321586 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.03914 and has 8 connected components, where the component with most
nodes has 4032 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 124, the mean node degree is 158.65, and
the node degree mode is 77. The top 5 most central nodes are 592316.Pat9b_1927
(degree 1496), 592316.Pat9b_3948 (degree 1327), 592316.Pat9b_1136 (degree
1311), 592316.Pat9b_2027 (degree 1310) and 592316.Pat9b_1910 (degree 1210).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaSpAt9b
# Then load the graph
graph = PantoeaSpAt9b()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="PantoeaSpAt9b",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| python | 6,627 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.