code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0f I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0' | 1' if Daylight Savings Time, '0' otherwise. |
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'thf S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th' | English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th |
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1f t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1] | Number of days in the given month; i.e. '28' to '31 |
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(namef T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name) | Time zone of this machine; e.g. 'EST' or 'MDT |
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple())f U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple())) | Seconds since the Unix epoch (January 1 1970 00:00:00 GMT) |
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return dof z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy | Day of the year; i.e. '0' to '365 |
def Z(self):
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# Only days can be negative, so negative offsets have days=-1 and
# seconds positive. Positive offsets have days=0
return offset.days * 86400 + offset.seconds | Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive. |
def print_metric(name, count, elapsed):
_do_print(name, count, elapsed, file=sys.stdout) | A metric function that prints to standard output
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds |
def stderr_metric(name, count, elapsed):
_do_print(name, count, elapsed, file=sys.stderr) | A metric function that prints to standard error
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds |
def make_multi_metric(*metrics):
def multi_metric(name, count, elapsed):
"""Calls multiple metrics (closure)"""
for m in metrics:
m(name, count, elapsed)
return multi_metric | Make a new metric function that calls the supplied metrics
:arg functions metrics: metric functions
:rtype: function |
def _is_orphan(scc, graph):
return all(p in scc for v in scc for p in graph.parents(v)) | Return False iff the given scc is reachable from elsewhere. |
def key_cycles():
graph = garbage()
sccs = graph.strongly_connected_components()
return [scc for scc in sccs if _is_orphan(scc, graph)] | Collect cyclic garbage, and return the strongly connected
components that were keeping the garbage alive. |
def _run_command(self, command, **kwargs):
try:
return {'output': subprocess.check_output(command, **kwargs)}
except Exception as e:
return {'error': str(e)} | Wrapper to pass command to plowshare.
:param command: The command to pass to plowshare.
:type command: str
:param **kwargs: Additional keywords passed into
:type **kwargs: dict
:returns: Object containing either output of plowshare command or an
error message.
:rtype: dict
:raises: Exception |
def _hosts_by_success(self, hosts=[]):
hosts = hosts if hosts else self.hosts
return sorted(hosts, key=lambda h: self._host_errors[h]) | Order hosts by most successful (least amount of errors) first.
:param hosts: List of hosts.
:type hosts: list
:returns: List of hosts sorted by successful connections.
:rtype: list |
def _filter_sources(self, sources):
filtered, hosts = [], []
for source in sources:
if 'error' in source:
continue
filtered.append(source)
hosts.append(source['host_name'])
return sorted(filtered, key=lambda s:
self._hosts_by_success(hosts).index(s['host_name'])) | Remove sources with errors and return ordered by host success.
:param sources: List of potential sources to connect to.
:type sources: list
:returns: Sorted list of potential sources without errors.
:rtype: list |
def upload(self, filename, number_of_hosts):
return self.multiupload(filename, self.random_hosts(number_of_hosts)) | Upload the given file to the specified number of hosts.
:param filename: The filename of the file to upload.
:type filename: str
:param number_of_hosts: The number of hosts to connect to.
:type number_of_hosts: int
:returns: A list of dicts with 'host_name' and 'url' keys for all
successful uploads or an empty list if all uploads failed.
:rtype: list |
def download(self, sources, output_directory, filename):
valid_sources = self._filter_sources(sources)
if not valid_sources:
return {'error': 'no valid sources'}
manager = Manager()
successful_downloads = manager.list([])
def f(source):
if not successful_downloads:
result = self.download_from_host(
source, output_directory, filename)
if 'error' in result:
self._host_errors[source['host_name']] += 1
else:
successful_downloads.append(result)
multiprocessing.dummy.Pool(len(valid_sources)).map(f, valid_sources)
return successful_downloads[0] if successful_downloads else {} | Download a file from one of the provided sources
The sources will be ordered by least amount of errors, so most
successful hosts will be tried first. In case of failure, the next
source will be attempted, until the first successful download is
completed or all sources have been depleted.
:param sources: A list of dicts with 'host_name' and 'url' keys.
:type sources: list
:param output_directory: Directory to save the downloaded file in.
:type output_directory: str
:param filename: Filename assigned to the downloaded file.
:type filename: str
:returns: A dict with 'host_name' and 'filename' keys if the download
is successful, or an empty dict otherwise.
:rtype: dict |
def download_from_host(self, source, output_directory, filename):
result = self._run_command(
["plowdown", source["url"], "-o",
output_directory, "--temp-rename"],
stderr=open("/dev/null", "w")
)
result['host_name'] = source['host_name']
if 'error' in result:
return result
temporary_filename = self.parse_output(
result['host_name'], result['output'])
result['filename'] = os.path.join(output_directory, filename)
result.pop('output')
os.rename(temporary_filename, result['filename'])
return result | Download a file from a given host.
This method renames the file to the given string.
:param source: Dictionary containing information about host.
:type source: dict
:param output_directory: Directory to place output in.
:type output_directory: str
:param filename: The filename to rename to.
:type filename: str
:returns: Dictionary with information about downloaded file.
:rtype: dict |
def multiupload(self, filename, hosts):
manager = Manager()
successful_uploads = manager.list([])
def f(host):
if len(successful_uploads) / float(len(hosts)) < \
settings.MIN_FILE_REDUNDANCY:
# Optimal redundancy not achieved, keep going
result = self.upload_to_host(filename, host)
if 'error' in result:
self._host_errors[host] += 1
else:
successful_uploads.append(result)
multiprocessing.dummy.Pool(len(hosts)).map(
f, self._hosts_by_success(hosts))
return list(successful_uploads) | Upload file to multiple hosts simultaneously
The upload will be attempted for each host until the optimal file
redundancy is achieved (a percentage of successful uploads) or the host
list is depleted.
:param filename: The filename of the file to upload.
:type filename: str
:param hosts: A list of hosts as defined in the master host list.
:type hosts: list
:returns: A list of dicts with 'host_name' and 'url' keys for all
successful uploads or an empty list if all uploads failed.
:rtype: list |
def upload_to_host(self, filename, hostname):
result = self._run_command(
["plowup", hostname, filename],
stderr=open("/dev/null", "w")
)
result['host_name'] = hostname
if 'error' not in result:
result['url'] = self.parse_output(hostname, result.pop('output'))
return result | Upload a file to the given host.
This method relies on 'plowup' being installed on the system.
If it succeeds, this method returns a dictionary with the host name,
and the final URL. Otherwise, it returns a dictionary with the
host name and an error flag.
:param filename: The filename of the file to upload.
:type filename: str
:param hostname: The host you are uploading the file to.
:type hostname: str
:returns: Dictionary containing information about upload to host.
:rtype: dict |
def parse_output(self, hostname, output):
if isinstance(output, bytes):
output = output.decode('utf-8')
return output.split()[-1] | Parse plowup's output.
For now, we just return the last line.
:param hostname: Name of host you are working with.
:type hostname: str
:param output: Dictionary containing information about a plowshare
action.
:type output: dict
:returns: Parsed and decoded output list.
:rtype: list |
def set(self, keyword, default, from_env=True):
env_key = '{}{}'.format(self.ENV_PREFIX, keyword.upper())
if hasattr(self, keyword):
return getattr(self, keyword)
value = default
if from_env and (env_key in env):
env_val = env.get(env_key)
should_eval = not isinstance(default, str)
try:
value = literal_eval(env_val) if should_eval else env_val
except (ValueError, SyntaxError):
raise ValueError("Unable to cast %r to %r" % (
env_val, type.__name__))
setattr(self, keyword, value)
return getattr(self, keyword) | Set value on self if not already set. If unset, attempt to
retrieve from environment variable of same name (unless disabled
via 'from_env'). If 'default' value is not a string, evaluate
environment variable as a Python type. If no env variables are
found, fallback to 'default' value. |
def _generate_queues(queues, exchange, platform_queue):
return set([
Queue('celery', exchange, routing_key='celery'),
Queue(platform_queue, exchange, routing_key='#'),
] + [
Queue(q_name, exchange, routing_key=q_name)
for q_name in queues
]) | Queues known by this worker |
def _erf(x):
T = [
9.60497373987051638749E0,
9.00260197203842689217E1,
2.23200534594684319226E3,
7.00332514112805075473E3,
5.55923013010394962768E4,
]
U = [
3.35617141647503099647E1,
5.21357949780152679795E2,
4.59432382970980127987E3,
2.26290000613890934246E4,
4.92673942608635921086E4,
]
# Shorcut special cases
if x == 0:
return 0
if x >= MAXVAL:
return 1
if x <= -MAXVAL:
return -1
if abs(x) > 1:
return 1 - erfc(x)
z = x * x
return x * _polevl(z, T, 4) / _p1evl(z, U, 5) | Port of cephes ``ndtr.c`` ``erf`` function.
See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c |
def _polevl(x, coefs, N):
ans = 0
power = len(coefs) - 1
for coef in coefs:
try:
ans += coef * x**power
except OverflowError:
pass
power -= 1
return ans | Port of cephes ``polevl.c``: evaluate polynomial
See https://github.com/jeremybarnes/cephes/blob/master/cprob/polevl.c |
def erfinv(z):
if abs(z) > 1:
raise ValueError("`z` must be between -1 and 1 inclusive")
# Shortcut special cases
if z == 0:
return 0
if z == 1:
return inf
if z == -1:
return -inf
# otherwise calculate things.
return _ndtri((z + 1) / 2.0) / math.sqrt(2) | Calculate the inverse error function at point ``z``.
This is a direct port of the SciPy ``erfinv`` function, originally
written in C.
Parameters
----------
z : numeric
Returns
-------
float
References
----------
+ https://en.wikipedia.org/wiki/Error_function#Inverse_functions
+ http://functions.wolfram.com/GammaBetaErf/InverseErf/
Examples
--------
>>> round(erfinv(0.1), 12)
0.088855990494
>>> round(erfinv(0.5), 12)
0.476936276204
>>> round(erfinv(-0.5), 12)
-0.476936276204
>>> round(erfinv(0.95), 12)
1.38590382435
>>> round(erf(erfinv(0.3)), 3)
0.3
>>> round(erfinv(erf(0.5)), 3)
0.5
>>> erfinv(0)
0
>>> erfinv(1)
inf
>>> erfinv(-1)
-inf |
def get_cmap(name, lut=None):
if name in rcParams['colors.cmaps']:
colors = rcParams['colors.cmaps'][name]
lut = lut or len(colors)
return FixedColorMap.from_list(name=name, colors=colors, N=lut)
elif name in _cmapnames:
colors = _cmapnames[name]
lut = lut or len(colors)
return FixedColorMap.from_list(name=name, colors=colors, N=lut)
else:
cmap = mpl_get_cmap(name)
# Note: we could include the `lut` in the call of mpl_get_cmap, but
# this raises a ValueError for colormaps like 'viridis' in mpl version
# 1.5. Besides the mpl_get_cmap function does not modify the lut if
# it does not match
if lut is not None and cmap.N != lut:
cmap = FixedColorMap.from_list(
name=cmap.name, colors=cmap(np.linspace(0, 1, lut)), N=lut)
return cmap | Returns the specified colormap.
Parameters
----------
name: str or :class:`matplotlib.colors.Colormap`
If a colormap, it returned unchanged.
%(cmap_note)s
lut: int
An integer giving the number of entries desired in the lookup table
Returns
-------
matplotlib.colors.Colormap
The colormap specified by `name`
See Also
--------
show_colormaps: A function to display all available colormaps
Notes
-----
Different from the :func::`matpltolib.pyplot.get_cmap` function, this
function changes the number of colors if `name` is a
:class:`matplotlib.colors.Colormap` instance to match the given `lut`. |
def _get_cmaps(names):
import matplotlib.pyplot as plt
available_cmaps = list(
chain(plt.cm.cmap_d, _cmapnames, rcParams['colors.cmaps']))
names = list(names)
wrongs = []
for arg in (arg for arg in names if (not isinstance(arg, Colormap) and
arg not in available_cmaps)):
if isinstance(arg, str):
similarkeys = get_close_matches(arg, available_cmaps)
if similarkeys != []:
warn("Colormap %s not found in standard colormaps.\n"
"Similar colormaps are %s." % (arg, ', '.join(similarkeys)))
else:
warn("Colormap %s not found in standard colormaps.\n"
"Run function without arguments to see all colormaps" % arg)
names.remove(arg)
wrongs.append(arg)
if not names and not wrongs:
names = sorted(m for m in available_cmaps if not m.endswith("_r"))
return names | Filter the given `names` for colormaps |
def _create_stdout_logger(logging_level):
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter(
'[%(asctime)s] %(message)s', "%H:%M:%S"
))
out_hdlr.setLevel(logging_level)
for name in LOGGING_NAMES:
log = logging.getLogger(name)
log.addHandler(out_hdlr)
log.setLevel(logging_level) | create a logger to stdout. This creates logger for a series
of module we would like to log information on. |
def main():
docraptor = DocRaptor()
print("Create PDF")
resp = docraptor.create(
{
"document_content": "<h1>python-docraptor</h1><p>Async Test</p>",
"test": True,
"async": True,
}
)
print("Status ID: {status_id}".format(status_id=resp["status_id"]))
status_id = resp["status_id"]
resp = docraptor.status(status_id)
print(" {status}".format(status=resp["status"]))
while resp["status"] != "completed":
time.sleep(3)
resp = docraptor.status(status_id)
print(" {status}".format(status=resp["status"]))
print("Download to test_async.pdf")
with open("test_async.pdf", "wb") as pdf_file:
pdf_file.write(docraptor.download(resp["download_key"]).content)
print("[DONE]") | Generate a PDF using the async method. |
def robust_isinstance(inst, typ) -> bool:
if typ is Any:
return True
if is_typevar(typ):
if hasattr(typ, '__constraints__') and typ.__constraints__ is not None:
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
elif hasattr(typ, '__bound__') and typ.__bound__ is not None:
return robust_isinstance(inst, typ.__bound__)
else:
# a raw TypeVar means 'anything'
return True
else:
if is_union_type(typ):
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
else:
return isinstance(inst, get_base_generic_type(typ)) | Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic
class so that the instance check works. It is also robust to Union and Any.
:param inst:
:param typ:
:return: |
def get_pretty_type_str(object_type) -> str:
try:
# DO NOT resolve forward references otherwise this can lead to infinite recursion
contents_item_type, contents_key_type = _extract_collection_base_type(object_type, resolve_fwd_refs=False)
if isinstance(contents_item_type, tuple):
return object_type.__name__ + '[' \
+ ', '.join([get_pretty_type_str(item_type) for item_type in contents_item_type]) + ']'
else:
if contents_key_type is not None:
return object_type.__name__ + '[' + get_pretty_type_str(contents_key_type) + ', ' \
+ get_pretty_type_str(contents_item_type) + ']'
elif contents_item_type is not None:
return object_type.__name__ + '[' + get_pretty_type_str(contents_item_type) + ']'
except Exception as e:
pass
if is_union_type(object_type):
return 'Union[' + ', '.join([get_pretty_type_str(item_type)
for item_type in get_args(object_type, evaluate=True)]) + ']'
elif is_typevar(object_type):
# typevars usually do not display their namespace so str() is compact. And it displays the cov/contrav symbol
return str(object_type)
else:
try:
return object_type.__name__
except:
return str(object_type) | Utility method to check if a type is a subclass of typing.{List,Dict,Set,Tuple}. In that case returns a
user-friendly character string with the inner item types, such as Dict[str, int].
:param object_type:
:return: type.__name__ if type is not a subclass of typing.{List,Dict,Set,Tuple}, otherwise
type__name__[list of inner_types.__name__] |
def is_collection(object_type, strict: bool = False) -> bool:
if object_type is None or object_type is Any or is_union_type(object_type) or is_typevar(object_type):
return False
elif strict:
return object_type == dict \
or object_type == list \
or object_type == tuple \
or object_type == set \
or get_base_generic_type(object_type) == Dict \
or get_base_generic_type(object_type) == List \
or get_base_generic_type(object_type) == Set \
or get_base_generic_type(object_type) == Tuple
else:
return issubclass(object_type, Dict) \
or issubclass(object_type, List) \
or issubclass(object_type, Set) \
or issubclass(object_type, Tuple) \
or issubclass(object_type, dict) \
or issubclass(object_type, list) \
or issubclass(object_type, tuple) \
or issubclass(object_type, set) | Utility method to check if a type is a subclass of typing.{List,Dict,Set,Tuple}
or of list, dict, set, tuple.
If strict is set to True, the method will return True only if the class IS directly one of the base collection
classes
:param object_type:
:param strict: if set to True, this method will look for a strict match.
:return: |
def get_all_subclasses(typ, recursive: bool = True, _memo = None) -> Sequence[Type[Any]]:
_memo = _memo or set()
# if we have collected the subclasses for this already, return
if typ in _memo:
return []
# else remember that we have collected them, and collect them
_memo.add(typ)
if is_generic_type(typ):
# We now use get_origin() to also find all the concrete subclasses in case the desired type is a generic
sub_list = get_origin(typ).__subclasses__()
else:
sub_list = typ.__subclasses__()
# recurse
result = []
for t in sub_list:
# only keep the origins in the list
to = get_origin(t) or t
try:
if to is not typ and to not in result and is_subtype(to, typ, bound_typevars={}):
result.append(to)
except:
# catching an error with is_subtype(Dict, Dict[str, int], bound_typevars={})
pass
# recurse
if recursive:
for typpp in sub_list:
for t in get_all_subclasses(typpp, recursive=True, _memo=_memo):
# unfortunately we have to check 't not in sub_list' because with generics strange things happen
# also is_subtype returns false when the parent is a generic
if t not in sub_list and is_subtype(t, typ, bound_typevars={}):
result.append(t)
return result | Returns all subclasses, and supports generic types. It is recursive by default
See discussion at https://github.com/Stewori/pytypes/issues/31
:param typ:
:param recursive: a boolean indicating whether recursion is needed
:param _memo: internal variable used in recursion to avoid exploring subclasses that were already explored
:return: |
def eval_forward_ref(typ: _ForwardRef):
for frame in stack():
m = getmodule(frame[0])
m_name = m.__name__ if m is not None else '<unknown>'
if m_name.startswith('parsyfiles.tests') or not m_name.startswith('parsyfiles'):
try:
# print("File {}:{}".format(frame.filename, frame.lineno))
return typ._eval_type(frame[0].f_globals, frame[0].f_locals)
except NameError:
pass
raise InvalidForwardRefError(typ) | Climbs the current stack until the given Forward reference has been resolved, or raises an InvalidForwardRefError
:param typ: the forward reference to resolve
:return: |
def is_valid_pep484_type_hint(typ_hint, allow_forward_refs: bool = False):
# most common case first, to be faster
try:
if isinstance(typ_hint, type):
return True
except:
pass
# optionally, check forward reference
try:
if allow_forward_refs and is_forward_ref(typ_hint):
return True
except:
pass
# finally check unions and typevars
try:
return is_union_type(typ_hint) or is_typevar(typ_hint)
except:
return False | Returns True if the provided type is a valid PEP484 type hint, False otherwise.
Note: string type hints (forward references) are not supported by default, since callers of this function in
parsyfiles lib actually require them to be resolved already.
:param typ_hint:
:param allow_forward_refs:
:return: |
def is_pep484_nonable(typ):
# TODO rely on typing_inspect if there is an answer to https://github.com/ilevkivskyi/typing_inspect/issues/14
if typ is type(None):
return True
elif is_typevar(typ) or is_union_type(typ):
return any(is_pep484_nonable(tt) for tt in get_alternate_types_resolving_forwardref_union_and_typevar(typ))
else:
return False | Checks if a given type is nonable, meaning that it explicitly or implicitly declares a Union with NoneType.
Nested TypeVars and Unions are supported.
:param typ:
:return: |
def get_validated_attribute_type_info(typ, item_type, attr_name):
if (typ is None) or (typ is Parameter.empty):
raise TypeInformationRequiredError.create_for_object_attributes(item_type, attr_name, typ)
# resolve forward references
typ = resolve_forward_ref(typ)
if not is_valid_pep484_type_hint(typ):
raise InvalidPEP484TypeHint.create_for_object_attributes(item_type, attr_name, typ)
return typ | Routine to validate that typ is a valid non-empty PEP484 type hint. If it is a forward reference, it will be
resolved
:param typ:
:param item_type:
:param attr_name:
:return: |
def create_for_collection_items(item_type, hint):
# this leads to infinite loops
# try:
# prt_type = get_pretty_type_str(item_type)
# except:
# prt_type = str(item_type)
return TypeInformationRequiredError("Cannot parse object of type {t} as a collection: this type has no valid "
"PEP484 type hint about its contents: found {h}. Please use a standard "
"PEP484 declaration such as Dict[str, Foo] or List[Foo]"
"".format(t=str(item_type), h=hint)) | Helper method for collection items
:param item_type:
:return: |
def create_for_object_attributes(item_type, faulty_attribute_name: str, hint):
# this leads to infinite loops
# try:
# prt_type = get_pretty_type_str(item_type)
# except:
# prt_type = str(item_type)
return TypeInformationRequiredError("Cannot create instances of type {t}: constructor attribute '{a}' has an"
" invalid PEP484 type hint: {h}.".format(t=str(item_type),
a=faulty_attribute_name, h=hint)) | Helper method for constructor attributes
:param item_type:
:return: |
def expected_eye_positions(bounding_box, padding = None):
if padding is None:
padding = default_paddings['eyes']
top, left, right = padding['top'], padding['left'], padding['right']
inter_eye_distance = (bounding_box.size[1]) / (right - left)
return {
'reye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.left_f - left/2.*inter_eye_distance),
'leye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.right_f - right/2.*inter_eye_distance)
} | expected_eye_positions(bounding_box, padding) -> eyes
Computes the expected eye positions based on the relative coordinates of the bounding box.
This function can be used to translate between bounding-box-based image cropping and eye-location-based alignment.
The returned eye locations return the **average** eye locations, no landmark detection is performed.
**Parameters:**
``bounding_box`` : :py:class:`BoundingBox`
The face bounding box as detected by one of the functions in ``bob.ip.facedetect``.
``padding`` : {'top':float, 'bottom':float, 'left':float, 'right':float}
The padding that was used for the ``eyes`` source in :py:func:`bounding_box_from_annotation`, has a proper default.
**Returns:**
``eyes`` : {'reye' : (rey, rex), 'leye' : (ley, lex)}
A dictionary containing the average left and right eye annotation. |
def parallel_part(data, parallel):
if parallel is None or "SGE_TASK_ID" not in os.environ:
return data
data_per_job = int(math.ceil(float(len(data)) / float(parallel)))
task_id = int(os.environ['SGE_TASK_ID'])
first = (task_id-1) * data_per_job
last = min(len(data), task_id * data_per_job)
return data[first:last] | parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data`` |
def quasi_random_indices(number_of_total_items, number_of_desired_items = None):
# check if we need to compute a sublist at all
if number_of_desired_items is None or number_of_desired_items >= number_of_total_items or number_of_desired_items < 0:
for i in range(number_of_total_items):
yield i
else:
increase = float(number_of_total_items)/float(number_of_desired_items)
# generate a regular quasi-random index list
for i in range(number_of_desired_items):
yield int((i +.5)*increase) | quasi_random_indices(number_of_total_items, [number_of_desired_items]) -> index
Yields an iterator to a quasi-random list of indices that will contain exactly the number of desired indices (or the number of total items in the list, if this is smaller).
This function can be used to retrieve a consistent and reproducible list of indices of the data, in case the ``number_of_total_items`` is lower that the given ``number_of_desired_items``.
**Parameters:**
``number_of_total_items`` : int
The total number of elements in the collection, which should be sub-sampled
``number_of_desired_items`` : int or ``None``
The number of items that should be used; if ``None`` or greater than ``number_of_total_items``, all indices are yielded
**Yields:**
``index`` : int
An iterator to indices, which will span ``number_of_total_items`` evenly. |
def exception_class(self, exception):
cls = type(exception)
if cls.__module__ == 'exceptions': # Built-in exception.
return cls.__name__
return "%s.%s" % (cls.__module__, cls.__name__) | Return a name representing the class of an exception. |
def request_info(self, request):
# We have to re-resolve the request path here, because the information
# is not stored on the request.
view, args, kwargs = resolve(request.path)
for i, arg in enumerate(args):
kwargs[i] = arg
parameters = {}
parameters.update(kwargs)
parameters.update(request.POST.items())
environ = request.META
return {
"session": dict(request.session),
'cookies': dict(request.COOKIES),
'headers': dict(get_headers(environ)),
'env': dict(get_environ(environ)),
"remote_ip": request.META["REMOTE_ADDR"],
"parameters": parameters,
"action": view.__name__,
"application": view.__module__,
"method": request.method,
"url": request.build_absolute_uri()
} | Return a dictionary of information for a given request.
This will be run once for every request. |
def _save(self, hdf5, model, positives, negatives):
# write the model and the training set indices to the given HDF5 file
hdf5.set("PositiveIndices", sorted(list(positives)))
hdf5.set("NegativeIndices", sorted(list(negatives)))
hdf5.create_group("Model")
hdf5.cd("Model")
model.save(hdf5)
del hdf5 | Saves the given intermediate state of the bootstrapping to file. |
def _load(self, hdf5):
positives = set(hdf5.get("PositiveIndices"))
negatives = set(hdf5.get("NegativeIndices"))
hdf5.cd("Model")
model = bob.learn.boosting.BoostedMachine(hdf5)
return model, positives, negatives | Loads the intermediate state of the bootstrapping from file. |
def undelay(self):
'''resolves all delayed arguments'''
i = 0
while i < len(self):
op = self[i]
i += 1
if hasattr(op, 'arg1'):
if isinstance(op.arg1,DelayedArg):
op.arg1 = op.arg1.resolve()
if isinstance(op.arg1,CodeBlock):
op.arg1.undelay(f undelay(self):
'''resolves all delayed arguments'''
i = 0
while i < len(self):
op = self[i]
i += 1
if hasattr(op, 'arg1'):
if isinstance(op.arg1,DelayedArg):
op.arg1 = op.arg1.resolve()
if isinstance(op.arg1,CodeBlock):
op.arg1.undelay() | resolves all delayed arguments |
def setup_logging(log_level=logging.INFO):
logging.basicConfig(level=log_level)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%Y-%m-%d %H:%M:%S'
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger('requests').setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
logger = logging.getLogger('')
logger.setLevel(log_level) | Set up the logging. |
def get_arguments():
parser = argparse.ArgumentParser("Lupupy: Command Line Utility")
parser.add_argument(
'-u', '--username',
help='Username',
required=False)
parser.add_argument(
'-p', '--password',
help='Password',
required=False)
parser.add_argument(
'--arm',
help='Arm alarm to mode',
required=False, default=False, action="store_true")
parser.add_argument(
'-i', '--ip_address',
help='IP of the Lupus panel',
required=False)
parser.add_argument(
'--disarm',
help='Disarm the alarm',
required=False, default=False, action="store_true")
parser.add_argument(
'--home',
help='Set to home mode',
required=False, default=False, action="store_true")
parser.add_argument(
'--devices',
help='Output all devices',
required=False, default=False, action="store_true")
parser.add_argument(
'--history',
help='Get the history',
required=False, default=False, action="store_true")
parser.add_argument(
'--status',
help='Get the status of the panel',
required=False, default=False, action="store_true")
parser.add_argument(
'--debug',
help='Enable debug logging',
required=False, default=False, action="store_true")
parser.add_argument(
'--quiet',
help='Output only warnings and errors',
required=False, default=False, action="store_true")
return parser.parse_args() | Get parsed arguments. |
def call():
args = get_arguments()
if args.debug:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.WARN
else:
log_level = logging.INFO
setup_logging(log_level)
lupusec = None
if not args.username or not args.password or not args.ip_address:
raise Exception("Please supply a username, password and ip.")
def _devicePrint(dev, append=''):
_LOGGER.info("%s%s", dev.desc, append)
try:
if args.username and args.password and args.ip_address:
lupusec = lupupy.Lupusec(ip_address=args.ip_address,
username=args.username,
password=args.password)
if args.arm:
if lupusec.get_alarm().set_away():
_LOGGER.info('Alarm mode changed to armed')
else:
_LOGGER.warning('Failed to change alarm mode to armed')
if args.disarm:
if lupusec.get_alarm().set_standby():
_LOGGER.info('Alarm mode changed to disarmed')
else:
_LOGGER.warning('Failed to change alarm mode to disarmed')
if args.home:
if lupusec.get_alarm().set_home():
_LOGGER.info('Alarm mode changed to home')
else:
_LOGGER.warning('Failed to change alarm mode to home')
if args.history:
_LOGGER.info(json.dumps(lupusec.get_history()['hisrows'], indent=4, sort_keys=True))
if args.status:
_LOGGER.info('Mode of panel: %s', lupusec.get_alarm().mode)
if args.devices:
for device in lupusec.get_devices():
_devicePrint(device)
except lupupy.LupusecException as exc:
_LOGGER.error(exc)
finally:
_LOGGER.info('--Finished running--') | Execute command line helper. |
def get_member_ibutton(self, val):
members = self.__con__.search_s(
CSHMember.__ldap_user_ou__,
ldap.SCOPE_SUBTREE,
"(ibutton=%s)" % val,
['ipaUniqueID'])
if members:
return CSHMember(
self,
members[0][1]['ipaUniqueID'][0].decode('utf-8'),
False)
return None | Get a CSHMember object.
Arguments:
val -- the iButton ID of the member
Returns:
None if the iButton supplied does not correspond to a CSH Member |
def get_member_slackuid(self, slack):
members = self.__con__.search_s(
CSHMember.__ldap_user_ou__,
ldap.SCOPE_SUBTREE,
"(slackuid=%s)" % slack,
['ipaUniqueID'])
if members:
return CSHMember(
self,
members[0][1]['ipaUniqueID'][0].decode('utf-8'),
False)
return None | Get a CSHMember object.
Arguments:
slack -- the Slack UID of the member
Returns:
None if the Slack UID provided does not correspond to a CSH Member |
def get_directorship_heads(self, val):
__ldap_group_ou__ = "cn=groups,cn=accounts,dc=csh,dc=rit,dc=edu"
res = self.__con__.search_s(
__ldap_group_ou__,
ldap.SCOPE_SUBTREE,
"(cn=eboard-%s)" % val,
['member'])
ret = []
for member in res[0][1]['member']:
try:
ret.append(member.decode('utf-8'))
except UnicodeDecodeError:
ret.append(member)
except KeyError:
continue
return [CSHMember(self,
dn.split('=')[1].split(',')[0],
True)
for dn in ret] | Get the head of a directorship
Arguments:
val -- the cn of the directorship |
def enqueue_mod(self, dn, mod):
# mark for update
if dn not in self.__pending_mod_dn__:
self.__pending_mod_dn__.append(dn)
self.__mod_queue__[dn] = []
self.__mod_queue__[dn].append(mod) | Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue |
def flush_mod(self):
for dn in self.__pending_mod_dn__:
try:
if self.__ro__:
for mod in self.__mod_queue__[dn]:
if mod[0] == ldap.MOD_DELETE:
mod_str = "DELETE"
elif mod[0] == ldap.MOD_ADD:
mod_str = "ADD"
else:
mod_str = "REPLACE"
print("{} VALUE {} = {} FOR {}".format(mod_str,
mod[1],
mod[2],
dn))
else:
self.__con__.modify_s(dn, self.__mod_queue__[dn])
except ldap.TYPE_OR_VALUE_EXISTS:
print("Error! Conflicting Batch Modification: %s"
% str(self.__mod_queue__[dn]))
continue
except ldap.NO_SUCH_ATTRIBUTE:
print("Error! Conflicting Batch Modification: %s"
% str(self.__mod_queue__[dn]))
continue
self.__mod_queue__[dn] = None
self.__pending_mod_dn__ = [] | Flush all pending LDAP modifications. |
def detect_encoding(value):
# https://tools.ietf.org/html/rfc4627#section-3
if six.PY2:
null_pattern = tuple(bool(ord(char)) for char in value[:4])
else:
null_pattern = tuple(bool(char) for char in value[:4])
encodings = {
# Zero is a null-byte, 1 is anything else.
(0, 0, 0, 1): 'utf-32-be',
(0, 1, 0, 1): 'utf-16-be',
(1, 0, 0, 0): 'utf-32-le',
(1, 0, 1, 0): 'utf-16-le',
}
return encodings.get(null_pattern, 'utf-8') | Returns the character encoding for a JSON string. |
def _merge_params(url, params):
if isinstance(params, dict):
params = list(params.items())
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
url_params = urllib.parse.parse_qsl(query, keep_blank_values=True)
url_params.extend(params)
query = _encode_data(url_params)
return urllib.parse.urlunsplit((scheme, netloc, path, query, fragment)) | Merge and encode query parameters with an URL. |
def json(self, **kwargs):
encoding = detect_encoding(self.content[:4])
value = self.content.decode(encoding)
return simplejson.loads(value, **kwargs) | Decodes response as JSON. |
def links(self):
# <https://example.com/?page=2>; rel="next", <https://example.com/?page=34>; rel="last"'
# becomes
# {
# 'last': {'rel': 'last', 'url': 'https://example.com/?page=34'},
# 'next': {'rel': 'next', 'url': 'https://example.com/?page=2'},
# },
result = {}
if 'Link' in self.headers:
value = self.headers['Link']
for part in re.split(r', *<', value):
link = {}
vs = part.split(';')
# First section is always an url.
link['url'] = vs.pop(0).strip('\'" <>')
for v in vs:
if '=' in v:
key, v = v.split('=')
link[key.strip('\'" ')] = v.strip('\'" ')
rkey = link.get('rel') or link['url']
result[rkey] = link
return result | A dict of dicts parsed from the response 'Link' header (if set). |
def raise_for_status(self):
if 400 <= self.status_code < 600:
message = 'Error %s for %s' % (self.status_code, self.url)
raise HTTPError(message) | Raises HTTPError if the request got an error. |
def unpack_text_io_wrapper(fp, encoding):
if isinstance(fp, io.TextIOWrapper):
if fp.writable() and encoding is not None and fp.encoding != encoding:
msg = 'TextIOWrapper.encoding({0!r}) != {1!r}'
raise RuntimeError(msg.format(fp.encoding, encoding))
if encoding is None:
encoding = fp.encoding
fp = fp.buffer
return fp, encoding | If *fp* is a #io.TextIOWrapper object, this function returns the underlying
binary stream and the encoding of the IO-wrapper object. If *encoding* is not
None and does not match with the encoding specified in the IO-wrapper, a
#RuntimeError is raised. |
def metric(cls, name, count, elapsed):
if name is None:
warnings.warn("Ignoring unnamed metric", stacklevel=3)
return
with cls.lock:
# register with atexit on first call
if cls.dump_atexit and not cls.instances:
atexit.register(cls.dump)
try:
self = cls.instances[name]
except KeyError:
self = cls.instances[name] = cls(name)
self.temp.write(self.struct.pack(count, elapsed)) | A metric function that buffers through numpy
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds |
def dump(cls):
with cls.lock:
if not cls.instances: return
atexit.unregister(cls.dump)
cls._pre_dump()
for self in cls.instances.values():
self._dump()
cls._post_dump() | Output all recorded metrics |
def _dump(self):
try:
self.temp.seek(0) # seek to beginning
arr = np.fromfile(self.temp, self.dtype)
self.count_arr = arr['count']
self.elapsed_arr = arr['elapsed']
if self.calc_stats:
# calculate mean & standard deviation
self.count_mean = np.mean(self.count_arr)
self.count_std = np.std(self.count_arr)
self.elapsed_mean = np.mean(self.elapsed_arr)
self.elapsed_std = np.std(self.elapsed_arr)
self._output()
finally:
self.temp.close()
self._cleanup() | dump data for an individual metric. For internal use only. |
def list(self, host_rec=None, service_rec=None, hostfilter=None):
return self.send.vuln_list(host_rec, service_rec, hostfilter) | Returns a list of vulnerabilities based on t_hosts.id or t_services.id.
If neither are set then statistical results are added
:param host_rec: db.t_hosts.id
:param service_rec: db.t_services.id
:param hostfilter: Valid hostfilter or None
:return: [(vulndata) ...] if host_rec or service_rec set
:return: [(vulndata, vuln_cnt, [vuln_ip, ...], [services ...]) ...] if nothing sent |
def ip_info(self, vuln_name=None, vuln_id=None, ip_list_only=True, hostfilter=None):
return self.send.vuln_ip_info(vuln_name, vuln_id, ip_list_only, hostfilter) | List of all IP Addresses with a vulnerability
:param vuln_name: t_vulndata.f_vulnid
:param vuln_id: t_vulndata.id
:param ip_list_only: IP List only (default) or rest of t_hosts fields
:param hostfilter: Valid hostfilter or none
:return: [(ip, hostname) ...] or [(ip, hostname, t_service_vulns.f_proof, t_service_vulns.f_status), ...] |
def service_list(self, vuln_name=None, vuln_id=None, hostfilter=None):
return self.send.vuln_service_list(vuln_name, vuln_id, hostfilter) | Returns a dictionary of vulns with services and IP Addresses
:param vuln_name: t_vulndata.f_vulnid
:param vuln_id: t_vulndata.id
:param hostfilter: Valid hostfilter or none
:return: {'vuln-id': {'port': [ ip, hostname ]} ...} ... |
def import_code(mod_code, mod_name):
mod_obj = imp.new_module(mod_name)
mod_obj.__file__ = None
exec_(mod_code, mod_obj.__dict__, mod_obj.__dict__)
add_to_sys_modules(mod_name=mod_name, mod_obj=mod_obj)
return mod_obj | Create a module object by code.
@param mod_code: the code that the module contains.
@param mod_name: module name. |
def import_name(mod_name):
try:
mod_obj_old = sys.modules[mod_name]
except KeyError:
mod_obj_old = None
if mod_obj_old is not None:
return mod_obj_old
__import__(mod_name)
mod_obj = sys.modules[mod_name]
return mod_obj | Import a module by module name.
@param mod_name: module name. |
def import_path(mod_path, mod_name):
mod_code = open(mod_path).read()
mod_obj = import_code(
mod_code=mod_code,
mod_name=mod_name,
)
if not hasattr(mod_obj, '__file__'):
mod_obj.__file__ = mod_path
return mod_obj | Import a module by module file path.
@param mod_path: module file path.
@param mod_name: module name. |
def import_obj(
uri,
mod_name=None,
mod_attr_sep='::',
attr_chain_sep='.',
retn_mod=False,
):
if mod_attr_sep is None:
mod_attr_sep = '::'
uri_parts = split_uri(uri=uri, mod_attr_sep=mod_attr_sep)
protocol, mod_uri, attr_chain = uri_parts
if protocol == 'py':
mod_obj = import_name(mod_uri)
else:
if not mod_name:
msg = (
'Argument `mod_name` must be given when loading by file path.'
)
raise ValueError(msg)
mod_obj = import_path(mod_uri, mod_name=mod_name)
if not attr_chain:
if retn_mod:
return mod_obj, None
else:
return mod_obj
if attr_chain_sep is None:
attr_chain_sep = '.'
attr_obj = get_attr_chain(
obj=mod_obj,
attr_chain=attr_chain,
sep=attr_chain_sep,
)
if retn_mod:
return mod_obj, attr_obj
else:
return attr_obj | Load an object from a module.
@param uri: an uri specifying which object to load.
An `uri` consists of two parts: module URI and attribute chain,
e.g. `a/b/c.py::x.y.z` or `a.b.c::x.y.z`
# Module URI
E.g. `a/b/c.py` or `a.b.c`.
Can be either a module name or a file path.
Whether it is a file path is determined by whether it ends with `.py`.
# Attribute chain
E.g. `x.y.z`.
@param mod_name: module name.
Must be given when `uri` specifies a module file path, not a module name.
@param mod_attr_sep: the separator between module name and attribute name.
@param attr_chain_sep: the separator between parts of attribute name.
@retn_mod: whether return module object. |
def add_to_sys_modules(mod_name, mod_obj=None):
mod_snames = mod_name.split('.')
parent_mod_name = ''
parent_mod_obj = None
for mod_sname in mod_snames:
if parent_mod_name == '':
current_mod_name = mod_sname
else:
current_mod_name = parent_mod_name + '.' + mod_sname
if current_mod_name == mod_name:
current_mod_obj = mod_obj
else:
current_mod_obj = sys.modules.get(current_mod_name, None)
if current_mod_obj is None:
current_mod_obj = imp.new_module(current_mod_name)
sys.modules[current_mod_name] = current_mod_obj
if parent_mod_obj is not None:
setattr(parent_mod_obj, mod_sname, current_mod_obj)
parent_mod_name = current_mod_name
parent_mod_obj = current_mod_obj | Add a module object to `sys.modules`.
@param mod_name: module name, used as key to `sys.modules`.
If `mod_name` is `a.b.c` while modules `a` and `a.b` are not existing,
empty modules will be created for `a` and `a.b` as well.
@param mod_obj: a module object.
If None, an empty module object will be created. |
def split_uri(uri, mod_attr_sep='::'):
uri_parts = uri.split(mod_attr_sep, 1)
if len(uri_parts) == 2:
mod_uri, attr_chain = uri_parts
else:
mod_uri = uri_parts[0]
attr_chain = None
if mod_uri.startswith('py://'):
protocol = 'py'
mod_uri = mod_uri[5:]
elif mod_uri.startswith('file://'):
protocol = 'file'
mod_uri = mod_uri[7:]
# If no protocol prefix is present, and the uri ends with `.py`, then
# consider the uri as module file path instead of module name.
elif mod_uri.endswith('.py'):
protocol = 'file'
else:
protocol = 'py'
info = (protocol, mod_uri, attr_chain)
return info | Split given URI into a tuple of (protocol, module URI, attribute chain).
@param mod_attr_sep: the separator between module name and attribute name. |
def get_attr_chain(obj, attr_chain, sep='.'):
if sep is None:
sep = '.'
attr_names = attr_chain.split(sep)
new_obj = obj
for attr_name in attr_names:
new_obj = getattr(new_obj, attr_name)
return new_obj | Get the last attribute of given attribute chain.
E.g. `get_attr_chain(x, 'a.b.c')` is equivalent to `x.a.b.c`.
@param obj: an object
@param attr_chain: a chain of attribute names
@param sep: separator for the chain of attribute names |
def is_closed(self):
for t in self.smi_vector:
found = False
for s in self.sm_vector:
if self.observation_table[s] == self.observation_table[t]:
self.equiv_classes[t] = s
found = True
break
if not found:
return False, t
return True, None | _check if the observation table is closed.
Args:
None
Returns:
tuple (bool, str): True if the observation table is
closed and false otherwise. If the table is not closed
the escaping string is returned. |
def _fill_table_entry(self, row, col):
"
Fill an entry of the observation table.
Args:
row (str): The row of the observation table
col (str): The column of the observation table
Returns:
None
"""
prefix = self._membership_query(row)
full_output = self._membership_query(row + col)
length = len(commonprefix([prefix, full_output]))
self.observation_table[row, col] = full_output[length:] | Fill an entry of the observation table.
Args:
row (str): The row of the observation table
col (str): The column of the observation table
Returns:
None |
def _run_in_hypothesis(self, mma, w_string, index):
"
Run the string in the hypothesis automaton for index steps and then
return the access string for the state reached concatanated with the
rest of the string w.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
index (int): The index value for selecting the prefix of w
Return:
str: The access string
"""
state = mma[0]
for i in range(index):
for arc in state:
if mma.isyms.find(arc.ilabel) == w_string[i]:
state = mma[arc.nextstate]
s_index = arc.nextstate
# The id of the state is its index inside the Sm list
access_string = self.observation_table.sm_vector[s_index]
logging.debug(
'Access string for %d: %s - %d ',
index,
access_string,
s_index)
return access_string | Run the string in the hypothesis automaton for index steps and then
return the access string for the state reached concatanated with the
rest of the string w.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
index (int): The index value for selecting the prefix of w
Return:
str: The access string |
def _check_suffix(self, w_string, access_string, index):
prefix_as = self._membership_query(access_string)
full_as = self._membership_query(access_string + w_string[index:])
prefix_w = self._membership_query(w_string[:index])
full_w = self._membership_query(w_string)
length = len(commonprefix([prefix_as, full_as]))
as_suffix = full_as[length:]
length = len(commonprefix([prefix_w, full_w]))
w_suffix = full_w[length:]
if as_suffix != w_suffix:
logging.debug('Access string state incorrect')
return True
logging.debug('Access string state correct.')
return False | Checks if access string suffix matches with the examined string suffix
Args:
w_string (str): The examined string to be consumed
access_string (str): The access string for the state
index (int): The index value for selecting the prefix of w
Returns:
bool: A boolean valuei indicating if matching was successful |
def _find_bad_transition(self, mma, w_string):
conj_out = mma.consume_input(w_string)
targ_out = self._membership_query(w_string)
# TODO: handle different length outputs from conjecture and target
# hypothesis.
length = min(len(conj_out), len(targ_out))
diff = [i for i in range(length)
if conj_out[i] != targ_out[i]]
if len(diff) == 0:
diff_index = len(targ_out)
else:
diff_index = diff[0]
low = 0
high = len(w_string)
while True:
i = (low + high) / 2
length = len(self._membership_query(w_string[:i]))
if length == diff_index + 1:
return w_string[:i]
elif length < diff_index + 1:
low = i + 1
else:
high = i - 1 | Checks for bad DFA transitions using the examined string
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
str: The prefix of the examined string that matches |
def _process_counter_example(self, mma, w_string):
Process a counterexample in the Rivest-Schapire way.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
None
"""
w_string = self._find_bad_transition(mma, w_string)
diff = len(w_string)
same = 0
while True:
i = (same + diff) / 2
access_string = self._run_in_hypothesis(mma, w_string, i)
is_diff = self._check_suffix(w_string, access_string, i)
if is_diff:
diff = i
else:
same = i
if diff - same == 1:
break
exp = w_string[diff:]
self.observation_table.em_vector.append(exp)
for row in self.observation_table.sm_vector + self.observation_table.smi_vector:
self._fill_table_entry(row, exp) | Process a counterexample in the Rivest-Schapire way.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
None |
def _ot_make_closed(self, access_string):
self.observation_table.sm_vector.append(access_string)
for i in self.alphabet:
self.observation_table.smi_vector.append(access_string + i)
for e in self.observation_table.em_vector:
self._fill_table_entry(access_string + i, e) | Given a state input_string in Smi that is not equivalent with any state in Sm
this method will move that state in Sm create a corresponding Smi
state and fill the corresponding entries in the table.
Args:
access_string (str): State access string
Returns:
None |
def get_mealy_conjecture(self):
mma = MealyMachine()
for s in self.observation_table.sm_vector:
for i in self.alphabet:
dst = self.observation_table.equiv_classes[s + i]
# If dst == None then the table is not closed.
if dst is None:
logging.debug('Conjecture attempt on non closed table.')
return None
o = self.observation_table[s, i]
src_id = self.observation_table.sm_vector.index(s)
dst_id = self.observation_table.sm_vector.index(dst)
mma.add_arc(src_id, dst_id, i, o)
# This works only for Mealy machines
for s in mma.states:
s.final = True
return mma | Utilize the observation table to construct a Mealy Machine.
The library used for representing the Mealy Machine is the python
bindings of the openFST library (pyFST).
Args:
None
Returns:
MealyMachine: A mealy machine build based on a closed and consistent
observation table. |
def _init_table(self):
self.observation_table.sm_vector.append('')
self.observation_table.smi_vector = list(self.alphabet)
self.observation_table.em_vector = list(self.alphabet)
for i in self.observation_table.em_vector:
self._fill_table_entry('', i)
for s, e in product(self.observation_table.smi_vector, self.observation_table.em_vector):
self._fill_table_entry(s, e) | Initialize the observation table. |
def learn_mealy_machine(self):
logging.info('Initializing learning procedure.')
self._init_table()
logging.info('Generating a closed and consistent observation table.')
while True:
closed = False
# Make sure that the table is closed and consistent
while not closed:
logging.debug('Checking if table is closed.')
closed, string = self.observation_table.is_closed()
if not closed:
logging.debug('Closing table.')
self._ot_make_closed(string)
else:
logging.debug('Table closed.')
# Create conjecture
mma = self.get_mealy_conjecture()
logging.info('Generated conjecture machine with %d states.',
len(list(mma.states)))
# _check correctness
logging.debug('Running equivalence query.')
found, counter_example = self._equivalence_query(mma)
# Are we done?
if found:
logging.info('No counterexample found. Hypothesis is correct!')
break
# Add the new experiments into the table to reiterate the
# learning loop
logging.info(
'Processing counterexample %input_string with length %d.',
counter_example,
len(counter_example))
self._process_counter_example(mma, counter_example)
logging.info('Learning complete.')
return mma | Implements the high level loop of the algorithm for learning a
Mealy machine.
Args:
None
Returns:
MealyMachine: The learned mealy machine |
def get_headers(environ):
for key, value in environ.iteritems():
key = str(key)
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value | Returns only proper HTTP headers. |
def get_host(environ):
scheme = environ.get('wsgi.url_scheme')
if 'HTTP_X_FORWARDED_HOST' in environ:
result = environ['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in environ:
result = environ['HTTP_HOST']
else:
result = environ['SERVER_NAME']
if (scheme, str(environ['SERVER_PORT'])) not \
in (('https', '443'), ('http', '80')):
result += ':' + environ['SERVER_PORT']
if result.endswith(':80') and scheme == 'http':
result = result[:-3]
elif result.endswith(':443') and scheme == 'https':
result = result[:-4]
return result | Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header.
:param environ: the WSGI environment to get the host of. |
def parse_library(lib_files):
tracks, playlists = lib_files
lib = MusicLibrary()
lib_length = len(tracks)
i = 0
writer = lib.ix.writer()
previous_procent_done_str = ""
for f in tracks:
track_info = TrackInfo(f)
lib.add_track_internal(track_info, writer)
current_percent_done_str = "%d%%" % (i / lib_length * 100)
if current_percent_done_str != previous_procent_done_str:
logs.print_info("Analizowanie biblioteki muzycznej... " + current_percent_done_str)
previous_procent_done_str = current_percent_done_str
i += 1.0
logs.print_info("Analizowanie playlist...")
for f in playlists:
with open(f, 'r') as fo:
playlist_dict = loads(fo.read())
playlist = Playlist(lib, f, playlist_dict['title'], playlist_dict['tracks'])
lib.add_playlist(playlist)
writer.commit()
logs.print_info("Optymalizacja index-u...")
lib.ix.optimize()
return lib | Analizuje pliki podane w liście lib_files
Zwraca instancję MusicLibrary |
def full_subgraph(self, objects):
vertices = ElementTransformSet(transform=id)
out_edges = KeyTransformDict(transform=id)
in_edges = KeyTransformDict(transform=id)
for obj in objects:
vertices.add(obj)
out_edges[obj] = []
in_edges[obj] = []
edges = set()
head = {}
tail = {}
for referrer in vertices:
for edge in self._out_edges[referrer]:
referent = self._head[edge]
if referent not in vertices:
continue
edges.add(edge)
tail[edge] = referrer
head[edge] = referent
out_edges[referrer].append(edge)
in_edges[referent].append(edge)
return ObjectGraph._raw(
vertices=vertices,
edges=edges,
out_edges=out_edges,
in_edges=in_edges,
head=head,
tail=tail,
) | Return the subgraph of this graph whose vertices
are the given ones and whose edges are the edges
of the original graph between those vertices. |
def _raw(cls, vertices, edges, out_edges, in_edges, head, tail):
self = object.__new__(cls)
self._out_edges = out_edges
self._in_edges = in_edges
self._head = head
self._tail = tail
self._vertices = vertices
self._edges = edges
return self | Private constructor for direct construction
of an ObjectGraph from its attributes.
vertices is the collection of vertices
out_edges and in_edges map vertices to lists of edges
head and tail map edges to objects. |
def _from_objects(cls, objects):
vertices = ElementTransformSet(transform=id)
out_edges = KeyTransformDict(transform=id)
in_edges = KeyTransformDict(transform=id)
for obj in objects:
vertices.add(obj)
out_edges[obj] = []
in_edges[obj] = []
# Edges are identified by simple integers, so
# we can use plain dictionaries for mapping
# edges to their heads and tails.
edge_label = itertools.count()
edges = set()
head = {}
tail = {}
for referrer in vertices:
for referent in gc.get_referents(referrer):
if referent not in vertices:
continue
edge = next(edge_label)
edges.add(edge)
tail[edge] = referrer
head[edge] = referent
out_edges[referrer].append(edge)
in_edges[referent].append(edge)
return cls._raw(
vertices=vertices,
edges=edges,
out_edges=out_edges,
in_edges=in_edges,
head=head,
tail=tail,
) | Private constructor: create graph from the given Python objects.
The constructor examines the referents of each given object to build up
a graph showing the objects and their links. |
def annotated(self):
# Build up dictionary of edge annotations.
edge_annotations = {}
for edge in self.edges:
if edge not in edge_annotations:
# We annotate all edges from a given object at once.
referrer = self._tail[edge]
known_refs = annotated_references(referrer)
for out_edge in self._out_edges[referrer]:
referent = self._head[out_edge]
if known_refs[referent]:
annotation = known_refs[referent].pop()
else:
annotation = None
edge_annotations[out_edge] = annotation
annotated_vertices = [
AnnotatedVertex(
id=id(vertex),
annotation=object_annotation(vertex),
)
for vertex in self.vertices
]
annotated_edges = [
AnnotatedEdge(
id=edge,
annotation=edge_annotations[edge],
head=id(self._head[edge]),
tail=id(self._tail[edge]),
)
for edge in self.edges
]
return AnnotatedGraph(
vertices=annotated_vertices,
edges=annotated_edges,
) | Annotate this graph, returning an AnnotatedGraph object
with the same structure. |
def export_image(self, filename='refcycle.png', format=None,
dot_executable='dot'):
return self.annotated().export_image(
filename=filename,
format=format,
dot_executable=dot_executable,
) | Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary. |
def owned_objects(self):
return (
[
self,
self.__dict__,
self._head,
self._tail,
self._out_edges,
self._out_edges._keys,
self._out_edges._values,
self._in_edges,
self._in_edges._keys,
self._in_edges._values,
self._vertices,
self._vertices._elements,
self._edges,
] +
list(six.itervalues(self._out_edges)) +
list(six.itervalues(self._in_edges))
) | List of gc-tracked objects owned by this ObjectGraph instance. |
def find_by_typename(self, typename):
return self.find_by(lambda obj: type(obj).__name__ == typename) | List of all objects whose type has the given name. |
def set_input(self, key, value):
if key not in self._inputs:
raise InputException("Key {0} is not a valid input!".format(key))
self._inputs[key].value = value | Sets the <key> to <value> |
def get_input(self, key, force=False):
if key not in self._inputs:
raise InputException("Key {0} is not a valid input!".format(key))
if self._inputs[key].prompt:
prompt = self._inputs[key].prompt
elif self._inputs[key].is_bool():
prompt = "{0}?".format(key)
else:
prompt = "please enter your {0}".format(key)
help_text = self._inputs[key].help if hasattr(self._inputs[key], 'help') else None
if self._inputs[key].value is EMPTY or force:
default_value = None
if self._inputs[key].default is not EMPTY:
default_value = self._inputs[key].default
if self._inputs[key].value is not EMPTY:
default_value = self._inputs[key].value
input_value = EMPTY
while input_value is EMPTY or input_value == '?':
if input_value == '?' and help_text:
print(help_text)
input_value = lib.prompt(
prompt,
default=default_value,
bool_type=self._inputs[key].in_type,
secret=self._inputs[key].is_secret)
self._inputs[key].value = input_value
return self._inputs[key].value | Get the value of <key> if it already exists, or prompt for it if not |
def get_unset_inputs(self):
return set([k for k, v in self._inputs.items() if v.is_empty(False)]) | Return a set of unset inputs |
def prompt_unset_inputs(self, force=False):
for k, v in self._inputs.items():
if force or v.is_empty(False):
self.get_input(k, force=force) | Prompt for unset input values |
def values(self, with_defaults=True):
return dict(((k, str(v)) for k, v in self._inputs.items() if not v.is_empty(with_defaults))) | Return the values dictionary, defaulting to default values |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.