code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def write_values(self): return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False)))
Return the dictionary with which to write values
def add_inputs_from_inputstring(self, input_string): raw_params = input_string.split('\n') param_attributes = (self._parse_param_line(rp) for rp in raw_params if len(rp.strip(' \t')) > 0) for param, attributes in param_attributes: self.add_input(param, attributes)
Add inputs using the input string format: gitroot==~/workspace username password? main_branch==comp_main
def _parse_param_line(self, line): value = line.strip('\n \t') if len(value) > 0: i = Input() if value.find('#') != -1: value, extra_attributes = value.split('#') try: extra_attributes = eval(extra_attributes) except SyntaxError: raise InputException("Incorrectly formatted input for {0}!".format(value)) if not isinstance(extra_attributes, dict): raise InputException("Incorrectly formatted input for {0}!".format(value)) if 'prompt' in extra_attributes: i.prompt = extra_attributes['prompt'] if 'help' in extra_attributes: i.help = extra_attributes['help'] if 'type' in extra_attributes: i.in_type = extra_attributes['type'] if i.in_type.find('/') != -1: i.in_type, i.out_type = i.in_type.split('/') if 'cast' in extra_attributes: i.out_type = extra_attributes['cast'] if value.find('==') != -1: value, default = value.split('==') i.default = default if value.endswith('?'): value = value[:-1] i.is_secret = True return (value, i) return None
Parse a single param line.
def extract_csv(zip_path, destination): with zipfile.ZipFile(zip_path) as zf: member_to_unzip = None for member in zf.namelist(): if member.endswith('.csv'): member_to_unzip = member break if not member_to_unzip: raise LookupError( "Couldn't find any CSV file in the archive" ) with zf.open(member_to_unzip) as zfp, \ open(destination, 'wb') as dfp: dfp.write(zfp.read())
Extract the first CSV file found in the given ``zip_path`` ZIP file to the ``destination`` file. Raises :class:`LookupError` if no CSV file can be found in the ZIP.
def download(self, overwrite=True): if overwrite or not os.path.exists(self.file_path): _, f = tempfile.mkstemp() try: urlretrieve(self.DOWNLOAD_URL, f) extract_csv(f, self.file_path) finally: os.remove(f)
Download the zipcodes CSV file. If ``overwrite`` is set to False, the file won't be downloaded if it already exists.
def get_locations(self): if not self.zipcode_mapping: self.download(overwrite=False) zipcode_mapping = {} with UnicodeReader(self.file_path, delimiter=';', encoding='latin1') as csv_reader: # Skip header next(csv_reader) for line in csv_reader: zipcode_mapping[int(line[1])] = Location( official_name=line[0], canton=line[5], municipality=line[3] ) self.zipcode_mapping = zipcode_mapping return self.zipcode_mapping
Return the zipcodes mapping as a list of ``{zipcode: location}`` dicts. The zipcodes file will be downloaded if necessary.
def get_zipcodes_for_canton(self, canton): zipcodes = [ zipcode for zipcode, location in self.get_locations().items() if location.canton == canton ] return zipcodes
Return the list of zipcodes for the given canton code.
def get_cantons(self): return sorted(list(set([ location.canton for location in self.get_locations().values() ])))
Return the list of unique cantons, sorted by name.
def get_municipalities(self): return sorted(list(set([ location.municipality for location in self.get_locations().values() ])))
Return the list of unique municipalities, sorted by name.
def term_vector(self, params): ''' params are either True/False, 'with_offsets', 'with_positions', 'with_positions_offsets' ''' if params == True: self[self.field]['term_vector'] = 'yes' elif params == False: self[self.field]['term_vector'] = 'no' else: self[self.field]['term_vector'] = params return self term_vector(self, params): ''' params are either True/False, 'with_offsets', 'with_positions', 'with_positions_offsets' ''' if params == True: self[self.field]['term_vector'] = 'yes' elif params == False: self[self.field]['term_vector'] = 'no' else: self[self.field]['term_vector'] = params return self
params are either True/False, 'with_offsets', 'with_positions', 'with_positions_offsets'
def _get_formula_class(self, formula): # recursive import otherwise from sprinter.formula.base import FormulaBase if formula in LEGACY_MAPPINGS: formula = LEGACY_MAPPINGS[formula] formula_class, formula_url = formula, None if ':' in formula: formula_class, formula_url = formula.split(":", 1) if formula_class not in self._formula_dict: try: self._formula_dict[formula_class] = lib.get_subclass_from_module(formula_class, FormulaBase) except (SprinterException, ImportError): logger.info("Downloading %s..." % formula_class) try: self._pip.install_egg(formula_url or formula_class) try: self._formula_dict[formula_class] = lib.get_subclass_from_module(formula_class, FormulaBase) except ImportError: logger.debug("FeatureDict import Error", exc_info=sys.exc_info()) raise SprinterException("Error: Unable to retrieve formula %s!" % formula_class) except PipException: logger.error("ERROR: Unable to download %s!" % formula_class) return self._formula_dict[formula_class]
get a formula class object if it exists, else create one, add it to the dict, and pass return it.
def is_backup_class(cls): return True if ( isclass(cls) and issubclass(cls, Storable) and get_mapping(cls, no_mapping_ok=True) ) else False
Return true if given class supports back up. Currently this means a gludb.data.Storable-derived class that has a mapping as defined in gludb.config
def add_package( self, pkg_name, recurse=True, include_bases=True, parent_pkg=None ): if parent_pkg: pkg = import_module('.' + pkg_name, parent_pkg) else: pkg = import_module(pkg_name) for module_loader, name, ispkg in pkgutil.walk_packages(pkg.__path__): if not ispkg: # Module mod = import_module('.' + name, pkg_name) for name, member in getmembers(mod): if is_backup_class(member): self.add_class(member, include_bases=include_bases) elif recurse: # Package and we're supposed to recurse self.add_package( pkg_name + '.' + name, recurse=True, include_bases=include_bases, parent_pkg=parent_pkg )
Add all classes to the backup in the specified package (including all modules and all sub-packages) for which is_backup_class returns True. Note that self.add_class is used, so base classes will added as well. Parameters: * pkg_name - a string representing the package name. It may be relative _if_ parent_pkg is supplied as well * recurse - (default value of True) if False, sub-packages will _not_ be examined * include_bases - (default value of True) is passed directly to add_class for every class added * parent_pkg - a string representing the parent package of the relative package specified in pkg_name. Note that you should specify parent_pkg _only_ if pkg_name should be interpreted as relative An an example of both relative and absolute package imports, these are equivalent: ```` backup.add_package('toppackage.subpackage') backup.add_package('subpackage', parent_pkg='toppackage') ````
def add_class(self, cls, include_bases=True): if not is_backup_class(cls): return 0 added = 0 cls_name = backup_name(cls) if cls_name not in self.classes: self.classes[cls_name] = cls self.log("Added class for backup: %s", cls_name) added = 1 if include_bases: for candidate_cls in getmro(cls): if is_backup_class(cls): # Note that we don't keep recursing on base classes added += self.add_class(candidate_cls, include_bases=False) return added
Add the specified class (which should be a class object, _not_ a string). By default all base classes for which is_backup_class returns True will also be added. `include_bases=False` may be spcified to suppress this behavior. The total number of classes added is returned. Note that if is_backup_class does not return True for the class object passed in, 0 will be returned. If you specify include_bases=False, then the maximum value that can be returned is 1.
def log(self, entry, *args): if args: entry = entry % args self.backup_log.append(entry)
Append the string supplied to the log (a list of strings). If additional arguments are supplied, then first string is assumed to be a format string and the other args are used for string interpolation. For instance `backup.log("%d + %d == %d", 1, 1, 2)` would result in the string `'1 + 1 == 2'` being logged
def list_dir(sourceDir, include_source=None, include_file=True): for cur_file in os.listdir(sourceDir): if cur_file.lower() == ".ds_store": continue pathWithSource = os.path.join(sourceDir, cur_file) if include_file or os.path.isdir(pathWithSource): if include_source: yield pathWithSource else: yield cur_file
与 :func:`os.listdir()` 类似,但提供一些筛选功能,且返回生成器对象。 :param str sourceDir: 待处理的文件夹。 :param bool include_source: 遍历结果中是否包含源文件夹的路径。 :param bool include_file: 是否包含文件。True 表示返回的内容中既包含文件,又 包含文件夹;Flase 代表仅包含文件夹。 :return: 一个生成器对象。
def copy_dir(sou_dir, dst_dir, del_dst=False, del_subdst=False): if del_dst and os.path.isdir(del_dst): shutil.rmtree(dst_dir) os.makedirs(dst_dir, exist_ok=True) for cur_file in list_dir(sou_dir): dst_file = os.path.join(dst_dir, cur_file) cur_file = os.path.join(sou_dir, cur_file) if os.path.isdir(cur_file): if del_subdst and os.path.isdir(dst_file): shutil.rmtree(dst_file) os.makedirs(dst_file, exist_ok=True) copy_dir(cur_file, dst_file) else: shutil.copyfile(cur_file, dst_file)
:func:`shutil.copytree()` 也能实现类似功能, 但前者要求目标文件夹必须不存在。 而 copy_dir 没有这个要求,它可以将 sou_dir 中的文件合并到 dst_dir 中。 :param str sou_dir: 待复制的文件夹; :param str dst_dir: 目标文件夹; :param bool del_dst: 是否删除目标文件夹。 :param bool del_subdst: 是否删除目标子文件夹。
def get_files(path, ext=[], include=True): has_ext = len(ext)>0 for p, d, fs in os.walk(path): for f in fs: if has_ext: in_ext = False for name in ext: if f.endswith(name): in_ext = True break if (include and in_ext) or \ (not include and not in_ext): yield os.path.join(p,f) else: yield os.path.join(p, f)
遍历提供的文件夹的所有子文件夹,饭后生成器对象。 :param str path: 待处理的文件夹。 :param list ext: 扩展名列表。 :param bool include: 若值为 True,代表 ext 提供的是包含列表; 否则是排除列表。 :returns: 一个生成器对象。
def read_file(file_path, **kws): kw = {"mode":"r", "encoding":"utf-8"} if kws: for k,v in kws.items(): kw[k] = v with open(file_path, **kw) as afile: txt = afile.read() return txt
读取文本文件的内容。 :param str file_path: 文件路径。 :returns: 文件内容。 :rtype: str
def write_file(file_path, txt, **kws): if not os.path.exists(file_path): upDir = os.path.dirname(file_path) if not os.path.isdir(upDir): os.makedirs(upDir) kw = {"mode":"w", "encoding":"utf-8"} if kws: for k,v in kws.items(): kw[k] = v with open(file_path, **kw) as afile: afile.write(txt)
将文本内容写入文件。 :param str file_path: 文件路径。 :param str txt: 待写入的文件内容。
def write_by_templ(templ, target, sub_value, safe=False): templ_txt = read_file(templ) txt = None if safe: txt = Template(templ_txt).safe_substitute(sub_value) else: txt = Template(templ_txt).substitute(sub_value) write_file(target, txt)
根据模版写入文件。 :param str templ: 模版文件所在路径。 :param str target: 要写入的文件所在路径。 :param dict sub_value: 被替换的内容。
def get_md5(path): with open(path,'rb') as f: md5obj = hashlib.md5() md5obj.update(f.read()) return md5obj.hexdigest() raise FileNotFoundError("Error when get md5 for %s!"%path)
获取文件的 MD5 值。 :param str path: 文件路径。 :returns: MD5 值。 :rtype: str
def create_zip(files, trim_arcname=None, target_file=None, **zipfile_args): zipname = None azip = None if not target_file: azip = tempfile.NamedTemporaryFile(mode='wb', delete=False) zipname = azip.name else: azip = target_file zipname = target_file.name if hasattr(azip, 'read') else azip slog.info('Package %d files to "%s"'%(len(files), azip.name)) fileNum = len(files) curFile = 0 zipfile_args['mode'] = 'w' if not zipfile_args.get('compression'): zipfile_args['compression'] = zipfile.ZIP_DEFLATED with zipfile.ZipFile(azip, **zipfile_args) as zipf: for f in files: percent = round(curFile/fileNum*100) sys.stdout.write('\r%d%%'%(percent)) sys.stdout.flush() zipf.write(f, f[trim_arcname:] if trim_arcname else None ) curFile = curFile+1 sys.stdout.write('\r100%\n') sys.stdout.flush() if hasattr(azip, 'close'): azip.close() return zipname
创建一个 zip 文件。 :param list files: 要创建zip 的文件列表。 :param int trim_arcname: 若提供这个值,则使用 ZipFile.write(filename, filename[trim_arcname:]) 进行调用。 :returns: zip 文件的路径。 :rtype: str
def get_max_ver(fmt, filelist): x, y, z = 0,0,0 verpat = fmt%'(\d+).(\d+).(\d+)' verre = re.compile(r''+verpat+'', re.M) for f in filelist: match = verre.search(f) if match: x1 = int(match.group(1)) y1 = int(match.group(2)) z1 = int(match.group(3)) if x1 >= x and y1 >= y: x = x1 y = y1 z = z1 verfmt = fmt%('%d.%d.%d') name = verfmt%(x, y, z) if x == 0 and y == 0 and z == 0: slog.info('Can not find the string "%s" !'%name) return None return name
有一堆字符串,文件名均包含 %d.%d.%d 形式版本号,返回其中版本号最大的那个。 我一般用它来检测一堆发行版中版本号最大的那个文件。 :param str fmt: 要检测测字符串形式,例如 rookout-%s.tar.gz ,其中 %s 会被正则替换。 :param list files: 字符串列表。 :returns: 版本号最大的字符串。 :rtype: str
def merge_dicts(d1, d2): for k in set(d1.keys()).union(d2.keys()): if k in d1 and k in d2: if isinstance(d1[k], dict) and isinstance(d2[k], dict): yield (k, dict(merge_dicts(d1[k], d2[k]))) elif isinstance(d1[k], list): if isinstance(d2[k], list): d1[k].extend(d2[k]) else: d1[k].append(d2[k]) yield(k, d1) else: # If one of the values is not a dict, you can't continue merging it. # Value from second dict overrides one in first and we move on. yield (k, d2[k]) # Alternatively, replace this with exception raiser to alert you of value conflicts elif k in d1: yield (k, d1[k]) else: yield (k, d2[k])
合并两个无限深度的 dict 会自动合并 list 格式 :param dict d1: 被合并的 dict :param dict d2: 待合并的 dict :returns: 一个新的生成器对象 :rtype: generator
def main(argv: Optional[Sequence[str]] = None) -> None: parser = ArgumentParser(description="Convert Jupyter Notebook assignments to PDFs") parser.add_argument( "--hw", type=int, required=True, help="Homework number to convert", dest="hw_num", ) parser.add_argument( "-p", "--problems", type=int, help="Problem numbers to convert", dest="problems", nargs="*", ) parser.add_argument( "--by-hand", type=int, help="Problem numbers to be completed by hand", dest="by_hand", nargs="*", ) args = parser.parse_args(argv) prefix = Path(f"homework/homework-{args.hw_num}") process(args.hw_num, args.problems, prefix=prefix, by_hand=args.by_hand)
Parse arguments and process the homework assignment.
def get_object_by_name(content, object_type, name, regex=False): ''' Get the vsphere object associated with a given text name Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py ''' container = content.viewManager.CreateContainerView( content.rootFolder, [object_type], True ) for c in container.view: if regex: if re.match(name, c.name): return c elif c.name == name: return f get_object_by_name(content, object_type, name, regex=False): ''' Get the vsphere object associated with a given text name Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py ''' container = content.viewManager.CreateContainerView( content.rootFolder, [object_type], True ) for c in container.view: if regex: if re.match(name, c.name): return c elif c.name == name: return c
Get the vsphere object associated with a given text name Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py
def get_vm_by_name(content, name, regex=False): ''' Get a VM by its name ''' return get_object_by_name(content, vim.VirtualMachine, name, regexf get_vm_by_name(content, name, regex=False): ''' Get a VM by its name ''' return get_object_by_name(content, vim.VirtualMachine, name, regex)
Get a VM by its name
def get_all(content, container, object_type): ''' Get all items of a certain type Example: get_all(content, vim.Datastore) return all datastore objects ''' obj_list = list() view_manager = content.viewManager object_view = view_manager.CreateContainerView( container, [object_type], True ) for obj in object_view.view: if isinstance(obj, object_type): obj_list.append(obj) object_view.Destroy() return obj_lisf get_all(content, container, object_type): ''' Get all items of a certain type Example: get_all(content, vim.Datastore) return all datastore objects ''' obj_list = list() view_manager = content.viewManager object_view = view_manager.CreateContainerView( container, [object_type], True ) for obj in object_view.view: if isinstance(obj, object_type): obj_list.append(obj) object_view.Destroy() return obj_list
Get all items of a certain type Example: get_all(content, vim.Datastore) return all datastore objects
def get_datacenter(content, obj): ''' Get the datacenter to whom an object belongs ''' datacenters = content.rootFolder.childEntity for d in datacenters: dch = get_all(content, d, type(obj)) if dch is not None and obj in dch: return f get_datacenter(content, obj): ''' Get the datacenter to whom an object belongs ''' datacenters = content.rootFolder.childEntity for d in datacenters: dch = get_all(content, d, type(obj)) if dch is not None and obj in dch: return d
Get the datacenter to whom an object belongs
def get_all_vswitches(content): ''' Get all the virtual switches ''' vswitches = [] hosts = get_all_hosts(content) for h in hosts: for s in h.config.network.vswitch: vswitches.append(s) return vswitchef get_all_vswitches(content): ''' Get all the virtual switches ''' vswitches = [] hosts = get_all_hosts(content) for h in hosts: for s in h.config.network.vswitch: vswitches.append(s) return vswitches
Get all the virtual switches
def print_vm_info(vm): ''' Print information for a particular virtual machine ''' summary = vm.summary print('Name : ', summary.config.name) print('Path : ', summary.config.vmPathName) print('Guest : ', summary.config.guestFullName) annotation = summary.config.annotation if annotation is not None and annotation != '': print('Annotation : ', annotation) print('State : ', summary.runtime.powerState) if summary.guest is not None: ip = summary.guest.ipAddress if ip is not None and ip != '': print('IP : ', ip) if summary.runtime.question is not None: print('Question : ', summary.runtime.question.text) print(''f print_vm_info(vm): ''' Print information for a particular virtual machine ''' summary = vm.summary print('Name : ', summary.config.name) print('Path : ', summary.config.vmPathName) print('Guest : ', summary.config.guestFullName) annotation = summary.config.annotation if annotation is not None and annotation != '': print('Annotation : ', annotation) print('State : ', summary.runtime.powerState) if summary.guest is not None: ip = summary.guest.ipAddress if ip is not None and ip != '': print('IP : ', ip) if summary.runtime.question is not None: print('Question : ', summary.runtime.question.text) print('')
Print information for a particular virtual machine
def module_import(module_path): try: # Import whole module path. module = __import__(module_path) # Split into components: ['contour', # 'extras','appengine','ndb_persistence']. components = module_path.split('.') # Starting at the second component, set module to a # a reference to that component. at the end # module with be the last component. In this case: # ndb_persistence for component in components[1:]: module = getattr(module, component) return module except ImportError: raise BadModulePathError( 'Unable to find module "%s".' % (module_path,))
Imports the module indicated in name Args: module_path: string representing a module path such as 'app.config' or 'app.extras.my_module' Returns: the module matching name of the last component, ie: for 'app.extras.my_module' it returns a reference to my_module Raises: BadModulePathError if the module is not found
def find_contour_yaml(config_file=__file__, names=None): checked = set() contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names) if not contour_yaml: contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names) return contour_yaml
Traverse directory trees to find a contour.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of contour.yaml or None if not found
def _find_countour_yaml(start, checked, names=None): extensions = [] if names: for name in names: if not os.path.splitext(name)[1]: extensions.append(name + ".yaml") extensions.append(name + ".yml") yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in yaml_names: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found
def _load_yaml_config(path=None): countour_yaml_path = path or find_contour_yaml() if countour_yaml_path is None: logging.debug("countour.yaml not found.") return None with open(countour_yaml_path) as yaml_file: return yaml_file.read()
Open and return the yaml contents.
def build_parser(): parser = argparse.ArgumentParser( description='dockerstache templating util' ) parser.add_argument( '--output', '-o', help='Working directory to render dockerfile and templates', dest='output', default=None ) parser.add_argument( '--input', '-i', help='Working directory containing dockerfile and script mustache templates', dest='input', default=os.getcwd() ) parser.add_argument( '--context', '-c', help='JSON file containing context dictionary to render templates', dest='context', default=None ) parser.add_argument( '--defaults', '-d', help='JSON file containing default context dictionary to render templates', dest='defaults', default=None ) parser.add_argument( '--inclusive', help='include non .mustache files from template', default=False, action='store_true' ) parser.add_argument( '--exclude', '-e', help='exclude files from template in this list', default=[], nargs='+' ) opts = parser.parse_args() return vars(opts)
_build_parser_ Set up CLI parser options, parse the CLI options an return the parsed results
def main(): options = build_parser() try: run(**options) except RuntimeError as ex: msg = ( "An error occurred running dockerstache: {} " "please see logging info above for details" ).format(ex) LOGGER.error(msg) sys.exit(1)
_main_ Create a CLI parser and use that to run the template rendering process
def _guess_type_from_validator(validator): if isinstance(validator, _OptionalValidator): # Optional : look inside return _guess_type_from_validator(validator.validator) elif isinstance(validator, _AndValidator): # Sequence : try each of them for v in validator.validators: typ = _guess_type_from_validator(v) if typ is not None: return typ return None elif isinstance(validator, _InstanceOfValidator): # InstanceOf validator : found it ! return validator.type else: # we could not find the type return None
Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator in order to unpack the validators. :param validator: :return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used) or None if no inner 'instance_of' validator is found
def is_optional(attr): return isinstance(attr.validator, _OptionalValidator) or (attr.default is not None and attr.default is not NOTHING)
Helper method to find if an attribute is mandatory :param attr: :return:
def get_attrs_declarations(item_type): # this will raise an error if the type is not an attr-created type attribs = fields(item_type) res = dict() for attr in attribs: attr_name = attr.name # -- is the attribute mandatory ? optional = is_optional(attr) # -- get and check the attribute type typ = guess_type_from_validators(attr) # -- store both info in result dict res[attr_name] = (typ, optional) return res
Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional) :param item_type: :return:
def preprocess( self, nb: "NotebookNode", resources: dict ) -> Tuple["NotebookNode", dict]: if not resources.get("global_content_filter", {}).get("include_raw", False): keep_cells = [] for cell in nb.cells: if cell.cell_type != "raw": keep_cells.append(cell) nb.cells = keep_cells return nb, resources
Remove any raw cells from the Notebook. By default, exclude raw cells from the output. Change this by including global_content_filter->include_raw = True in the resources dictionary. This preprocessor is necessary because the NotebookExporter doesn't include the exclude_raw config.
def preprocess( self, nb: "NotebookNode", resources: dict ) -> Tuple["NotebookNode", dict]: if "remove_solution" not in resources: raise KeyError("The resources dictionary must have a remove_solution key.") if resources["remove_solution"]: keep_cells_idx = [] for index, cell in enumerate(nb.cells): if "## solution" in cell.source.lower(): keep_cells_idx.append(index) # The space at the end of the test string here is important elif len(keep_cells_idx) > 0 and cell.source.startswith("### "): keep_cells_idx.append(index) keep_cells = nb.cells[: keep_cells_idx[0] + 1] for i in keep_cells_idx[1:]: keep_cells.append(nb.cells[i]) if resources["by_hand"]: keep_cells.append(by_hand_cell) else: if "sketch" in nb.cells[i].source.lower(): keep_cells.append(sketch_cell) else: keep_cells.append(md_expl_cell) keep_cells.append(code_ans_cell) keep_cells.append(md_ans_cell) nb.cells = keep_cells return nb, resources
Preprocess the entire notebook.
def preprocess( self, nb: "NotebookNode", resources: dict ) -> Tuple["NotebookNode", dict]: for index, cell in enumerate(nb.cells): if "## Solution" in cell.source: nb.cells[index + 1].source = "" return nb, resources
Preprocess the entire Notebook.
def preprocess( self, nb: "NotebookNode", resources: dict ) -> Tuple["NotebookNode", dict]: exam_num = resources["exam_num"] time = resources["time"] date = resources["date"] nb.cells.insert(0, new_markdown_cell(source="---")) nb.cells.insert(0, new_markdown_cell(source="")) nb.cells.insert(0, exam_instructions_cell) first_cell_source = ( "# ME 2233: Thermodynamic Principles\n\n" f"# Exam {exam_num} - {time}\n\n# {date}" ) nb.cells.insert(0, new_markdown_cell(source=first_cell_source)) return nb, resources
Preprocess the entire Notebook.
def parse_from_dict(json_dict): order_columns = json_dict['columns'] order_list = MarketOrderList( upload_keys=json_dict['uploadKeys'], order_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] order_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: order_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, order_columns, row) order_kwargs.update({ 'region_id': region_id, 'type_id': type_id, 'generated_at': generated_at, }) order_kwargs['order_issue_date'] = parse_datetime(order_kwargs['order_issue_date']) order_list.add_order(MarketOrder(**order_kwargs)) return order_list
Given a Unified Uploader message, parse the contents and return a MarketOrderList. :param dict json_dict: A Unified Uploader message as a JSON dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within.
def encode_to_json(order_list): rowsets = [] for items_in_region_list in order_list._orders.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] for order in items_in_region_list.orders: issue_date = gen_iso_datetime_str(order.order_issue_date) # The order in which these values are added is crucial. It must # match STANDARD_ENCODED_COLUMNS. rows.append([ order.price, order.volume_remaining, order.order_range, order.order_id, order.volume_entered, order.minimum_volume, order.is_bid, issue_date, order.order_duration, order.station_id, order.solar_system_id, ]) rowsets.append(dict( generatedAt = generated_at, regionID = region_id, typeID = type_id, rows = rows, )) json_dict = { 'resultType': 'orders', 'version': '0.1', 'uploadKeys': order_list.upload_keys, 'generator': order_list.order_generator, 'currentTime': gen_iso_datetime_str(now_dtime_in_utc()), # This must match the order of the values in the row assembling portion # above this. 'columns': STANDARD_ENCODED_COLUMNS, 'rowsets': rowsets, } return json.dumps(json_dict)
Encodes this list of MarketOrder instances to a JSON string. :param MarketOrderList order_list: The order list to serialize. :rtype: str
def weather(query): print 'Identifying the location . . .' try: response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner", headers={ "X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP", "Content-Type": "application/x-www-form-urlencoded" }, params={ "text": query } ) except: print 'Unable to connect to internet' return location = '' for entity in response.body['result'].split(): word,tag = entity.split('/') if(tag == 'LOCATION'): location += ' '+word if(location != ''): print 'Gathering weather information for'+location import urllib2, urllib, json baseurl = "https://query.yahooapis.com/v1/public/yql?" yql_query = "select * from weather.forecast where woeid in \ (select woeid from geo.places(1) where text=\""+location+"\")" yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json" try: result = urllib2.urlopen(yql_url).read() data = json.loads(result) result = data['query']['results']['channel'] print result['location']['city']+' '+result['location']['country']+' '+result['location']['region'] print result['item']['condition']['date'] print result['item']['condition']['text'] print result['item']['condition']['temp']+' '+result['units']['temperature'] except: print 'Unable to connect to internet' else: print 'Unable to get the location.'
weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to determine location entity in query and fetch weather info for that location (using yahoo apis).
def generic(query): try: response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner", headers={ "X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP", "Content-Type": "application/x-www-form-urlencoded" }, params={ "text": query } ) except: print 'Unable to connect to internet' return web_query = '' for entity in response.body['result'].split(): word,tag = entity.split('/') if(tag != 'O'): web_query += ' '+word if(web_query != ''): web_query = web_query.strip().split() duckduckgo.query(web_query) else: print 'I do not know how to process this query at this moment.'
generic(query) -- process a generic user query using the Stanford NLTK NER and duckduckgo api.
def _can_construct_from_str(strict_mode: bool, from_type: Type, to_type: Type) -> bool: return to_type not in {int, float, bool}
Returns true if the provided types are valid for constructor_with_str_arg conversion Explicitly declare that we are not able to convert primitive types (they already have their own converters) :param strict_mode: :param from_type: :param to_type: :return:
def are_flags_valid(packet_type, flags): if packet_type == MqttControlPacketType.publish: rv = 0 <= flags <= 15 elif packet_type in (MqttControlPacketType.pubrel, MqttControlPacketType.subscribe, MqttControlPacketType.unsubscribe): rv = flags == 2 elif packet_type in (MqttControlPacketType.connect, MqttControlPacketType.connack, MqttControlPacketType.puback, MqttControlPacketType.pubrec, MqttControlPacketType.pubcomp, MqttControlPacketType.suback, MqttControlPacketType.unsuback, MqttControlPacketType.pingreq, MqttControlPacketType.pingresp, MqttControlPacketType.disconnect): rv = flags == 0 else: raise NotImplementedError(packet_type) return rv
True when flags comply with [MQTT-2.2.2-1] requirements based on packet_type; False otherwise. Parameters ---------- packet_type: MqttControlPacketType flags: int Integer representation of 4-bit MQTT header flags field. Values outside of the range [0, 15] will certainly cause the function to return False. Returns ------- bool
def decode(f): decoder = mqtt_io.FileDecoder(f) (byte_0,) = decoder.unpack(mqtt_io.FIELD_U8) packet_type_u4 = (byte_0 >> 4) flags = byte_0 & 0x0f try: packet_type = MqttControlPacketType(packet_type_u4) except ValueError: raise DecodeError('Unknown packet type 0x{:02x}.'.format(packet_type_u4)) if not are_flags_valid(packet_type, flags): raise DecodeError('Invalid flags for packet type.') num_bytes, num_remaining_bytes = decoder.unpack_varint(4) return decoder.num_bytes_consumed, MqttFixedHeader(packet_type, flags, num_remaining_bytes)
Extract a `MqttFixedHeader` from ``f``. Parameters ---------- f: file Object with read method. Raises ------- DecodeError When bytes decoded have values incompatible with a `MqttFixedHeader` object. UnderflowDecodeError When end-of-stream is encountered before the end of the fixed header. Returns ------- int Number of bytes consumed from ``f``. MqttFixedHeader Header object extracted from ``f``.
def encode_body(self, f): num_bytes_written = 0 num_bytes_written += self.__encode_name(f) num_bytes_written += self.__encode_protocol_level(f) num_bytes_written += self.__encode_connect_flags(f) num_bytes_written += self.__encode_keep_alive(f) num_bytes_written += mqtt_io.encode_utf8(self.client_id, f) if self.will is not None: num_bytes_written += mqtt_io.encode_utf8(self.will.topic, f) num_bytes_written += mqtt_io.encode_bytes(self.will.message, f) if self.username is not None: num_bytes_written += mqtt_io.encode_utf8(self.username, f) if self.password is not None: num_bytes_written += mqtt_io.encode_utf8(self.password, f) return num_bytes_written
Parameters ---------- f: file File-like object with a write method. Returns ------- int Number of bytes written to ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.subscribe decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) topics = [] while header.remaining_len > decoder.num_bytes_consumed: num_str_bytes, name = decoder.unpack_utf8() max_qos, = decoder.unpack(mqtt_io.FIELD_U8) try: sub_topic = MqttTopic(name, max_qos) except ValueError: raise DecodeError('Invalid QOS {}'.format(max_qos)) topics.append(sub_topic) assert header.remaining_len == decoder.num_bytes_consumed return decoder.num_bytes_consumed, MqttSubscribe(packet_id, topics)
Generates a `MqttSubscribe` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `subscribe`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttSubscribe Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.suback decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) results = [] while header.remaining_len > decoder.num_bytes_consumed: result, = decoder.unpack(mqtt_io.FIELD_U8) try: results.append(SubscribeResult(result)) except ValueError: raise DecodeError('Unsupported result {:02x}.'.format(result)) assert header.remaining_len == decoder.num_bytes_consumed return decoder.num_bytes_consumed, MqttSuback(packet_id, results)
Generates a `MqttSuback` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `suback`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttSuback Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.publish dupe = bool(header.flags & 0x08) retain = bool(header.flags & 0x01) qos = ((header.flags & 0x06) >> 1) if qos == 0 and dupe: # The DUP flag MUST be set to 0 for all QoS 0 messages # [MQTT-3.3.1-2] raise DecodeError("Unexpected dupe=True for qos==0 message [MQTT-3.3.1-2].") decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) num_bytes_consumed, topic_name = decoder.unpack_utf8() if qos != 0: # See MQTT 3.1.1 section 3.3.2.2 # See https://github.com/kcallin/mqtt-codec/issues/5 packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) else: packet_id = 0 payload_len = header.remaining_len - decoder.num_bytes_consumed payload = decoder.read(payload_len) return decoder.num_bytes_consumed, MqttPublish(packet_id, topic_name, payload, dupe, qos, retain)
Generates a `MqttPublish` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `publish`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPublish Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.pubrel decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_U16) if header.remaining_len != decoder.num_bytes_consumed: raise DecodeError('Extra bytes at end of packet.') return decoder.num_bytes_consumed, MqttPubrel(packet_id)
Generates a `MqttPubrel` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pubrel`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPubrel Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.unsubscribe decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) topics = [] while header.remaining_len > decoder.num_bytes_consumed: num_str_bytes, topic = decoder.unpack_utf8() topics.append(topic) assert header.remaining_len - decoder.num_bytes_consumed == 0 return decoder.num_bytes_consumed, MqttUnsubscribe(packet_id, topics)
Generates a `MqttUnsubscribe` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `unsubscribe`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttUnsubscribe Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.unsuback decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) if header.remaining_len != decoder.num_bytes_consumed: raise DecodeError('Extra bytes at end of packet.') return decoder.num_bytes_consumed, MqttUnsuback(packet_id)
Generates a `MqttUnsuback` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `unsuback`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttUnsuback Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.pingreq if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttPingreq()
Generates a `MqttPingreq` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pingreq`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPingreq Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.pingresp if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttPingresp()
Generates a `MqttPingresp` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pingresp`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPingresp Object extracted from ``f``.
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.disconnect if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttDisconnect()
Generates a :class:`MqttDisconnect` packet given a :class:`MqttFixedHeader`. This method asserts that header.packet_type is :const:`MqttControlPacketType.disconnect`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttDisconnect Object extracted from ``f``.
def getter(name, key=None): if not key: key = lambda x: x def wrapper(self): return key(getattr(self, name)) wrapper.__name__ = wrapper.__qualname__ = name return property(wrapper)
Creates a read-only property for the attribute name *name*. If a *key* function is provided, it can be used to post-process the value of the attribute.
def connect(self): if self.token: self.phab_session = {'token': self.token} return req = self.req_session.post('%s/api/conduit.connect' % self.host, data={ 'params': json.dumps(self.connect_params), 'output': 'json', '__conduit__': True, }) # Parse out the response (error handling ommitted) result = req.json()['result'] self.phab_session = { 'sessionKey': result['sessionKey'], 'connectionID': result['connectionID'], }
Sets up your Phabricator session, it's not necessary to call this directly
def request(self, method, params=None): if params is None: params = {} if not self.phab_session: self.connect() url = '%s/api/%s' % (self.host, method) params['__conduit__'] = self.phab_session req = self.req_session.post(url, data={ 'params': json.dumps(params), 'output': 'json', }) return json.loads( req.content.decode(), object_pairs_hook=collections.OrderedDict )['result']
Make a request to a method in the phabricator API :param method: Name of the API method to call :type method: basestring :param params: Optional dict of params to pass :type params: dict
def get_musiclibrary(): lib_files = music_library.get_file_list(config.library_path) global lib lib = music_library.parse_library(lib_files) return lib
:type :musiclibrary.MusicLibrary
def install(force=False): ret, git_dir, _ = run("git rev-parse --show-toplevel") if ret != 0: click.echo( "ERROR: Please run from within a GIT repository.", file=sys.stderr) raise click.Abort git_dir = git_dir[0] hooks_dir = os.path.join(git_dir, HOOK_PATH) for hook in HOOKS: hook_path = os.path.join(hooks_dir, hook) if os.path.exists(hook_path): if not force: click.echo( "Hook already exists. Skipping {0}".format(hook_path), file=sys.stderr) continue else: os.unlink(hook_path) source = os.path.join(sys.prefix, "bin", "kwalitee-" + hook) os.symlink(os.path.normpath(source), hook_path) return True
Install git hooks.
def uninstall(): ret, git_dir, _ = run("git rev-parse --show-toplevel") if ret != 0: click.echo( "ERROR: Please run from within a GIT repository.", file=sys.stderr) raise click.Abort git_dir = git_dir[0] hooks_dir = os.path.join(git_dir, HOOK_PATH) for hook in HOOKS: hook_path = os.path.join(hooks_dir, hook) if os.path.exists(hook_path): os.remove(hook_path) return True
Uninstall git hooks.
def find_promulgation_date(line): line = line.split(' du ')[1] return format_date(re.search(r"(\d\d? \w\w\w+ \d\d\d\d)", line).group(1))
>>> find_promulgation_date("Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre...") '2010-04-16'
def auto_need(form): requirements = form.get_widget_requirements() for library, version in requirements: resources = resource_mapping[library] if not isinstance(resources, list): # pragma: no cover (bw compat only) resources = [resources] for resource in resources: resource.need()
Automatically ``need()`` the relevant Fanstatic resources for a form. This function automatically utilises libraries in the ``js.*`` namespace (such as ``js.jquery``, ``js.tinymce`` and so forth) to allow Fanstatic to better manage these resources (caching, minifications) and avoid duplication across the rest of your application.
def setup_logger(): logger = logging.getLogger('dockerstache') logger.setLevel(logging.INFO) handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(logging.INFO) logger.addHandler(handler) return logger
setup basic logger
def named_any(name): assert name, 'Empty module name' names = name.split('.') topLevelPackage = None moduleNames = names[:] while not topLevelPackage: if moduleNames: trialname = '.'.join(moduleNames) try: topLevelPackage = __import__(trialname) except Exception, ex: moduleNames.pop() else: if len(names) == 1: raise Exception("No module named %r" % (name,)) else: raise Exception('%r does not name an object' % (name,)) obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj
Retrieve a Python object by its fully qualified name from the global Python module namespace. The first part of the name, that describes a module, will be discovered and imported. Each subsequent part of the name is treated as the name of an attribute of the object specified by all of the name which came before it. @param name: The name of the object to return. @return: the Python object identified by 'name'.
def for_name(modpath, classname): ''' Returns a class of "classname" from module "modname". ''' module = __import__(modpath, fromlist=[classname]) classobj = getattr(module, classname) return classobj(f for_name(modpath, classname): ''' Returns a class of "classname" from module "modname". ''' module = __import__(modpath, fromlist=[classname]) classobj = getattr(module, classname) return classobj()
Returns a class of "classname" from module "modname".
def _convert(self, val): if isinstance(val, dict) and not isinstance(val, DotDict): return DotDict(val), True elif isinstance(val, list) and not isinstance(val, DotList): return DotList(val), True return val, False
Convert the type if necessary and return if a conversion happened.
def full_subgraph(self, vertices): obj_map = {vertex.id: vertex for vertex in vertices} edges = [ edge for vertex_id in obj_map for edge in self._out_edges[vertex_id] if edge.head in obj_map ] return AnnotatedGraph( vertices=obj_map.values(), edges=edges, )
Return the subgraph of this graph whose vertices are the given ones and whose edges are the edges of the original graph between those vertices.
def to_json(self): obj = { "vertices": [ { "id": vertex.id, "annotation": vertex.annotation, } for vertex in self.vertices ], "edges": [ { "id": edge.id, "annotation": edge.annotation, "head": edge.head, "tail": edge.tail, } for edge in self._edges ], } # Ensure that we always return unicode output on Python 2. return six.text_type(json.dumps(obj, ensure_ascii=False))
Convert to a JSON string.
def from_json(cls, json_graph): obj = json.loads(json_graph) vertices = [ AnnotatedVertex( id=vertex["id"], annotation=vertex["annotation"], ) for vertex in obj["vertices"] ] edges = [ AnnotatedEdge( id=edge["id"], annotation=edge["annotation"], head=edge["head"], tail=edge["tail"], ) for edge in obj["edges"] ] return cls(vertices=vertices, edges=edges)
Reconstruct the graph from a graph exported to JSON.
def export_json(self, filename): json_graph = self.to_json() with open(filename, 'wb') as f: f.write(json_graph.encode('utf-8'))
Export graph in JSON form to the given file.
def import_json(cls, filename): with open(filename, 'rb') as f: json_graph = f.read().decode('utf-8') return cls.from_json(json_graph)
Import graph from the given file. The file is expected to contain UTF-8 encoded JSON data.
def to_dot(self): edge_labels = { edge.id: edge.annotation for edge in self._edges } edges = [self._format_edge(edge_labels, edge) for edge in self._edges] vertices = [ DOT_VERTEX_TEMPLATE.format( vertex=vertex.id, label=dot_quote(vertex.annotation), ) for vertex in self.vertices ] return DOT_DIGRAPH_TEMPLATE.format( edges="".join(edges), vertices="".join(vertices), )
Produce a graph in DOT format.
def export_image(self, filename='refcycle.png', format=None, dot_executable='dot'): # Figure out what output format to use. if format is None: _, extension = os.path.splitext(filename) if extension.startswith('.') and len(extension) > 1: format = extension[1:] else: format = 'png' # Convert to 'dot' format. dot_graph = self.to_dot() # We'll send the graph directly to the process stdin. cmd = [ dot_executable, '-T{}'.format(format), '-o{}'.format(filename), ] dot = subprocess.Popen(cmd, stdin=subprocess.PIPE) dot.communicate(dot_graph.encode('utf-8'))
Export graph as an image. This requires that Graphviz is installed and that the ``dot`` executable is in your path. The *filename* argument specifies the output filename. The *format* argument lets you specify the output format. It may be any format that ``dot`` understands, including extended format specifications like ``png:cairo``. If omitted, the filename extension will be used; if no filename extension is present, ``png`` will be used. The *dot_executable* argument lets you provide a full path to the ``dot`` executable if necessary.
def install_brew(target_path): if not os.path.exists(target_path): try: os.makedirs(target_path) except OSError: logger.warn("Unable to create directory %s for brew." % target_path) logger.warn("Skipping...") return extract_targz(HOMEBREW_URL, target_path, remove_common_prefix=True)
Install brew to the target path
def scales(self, image): # compute the minimum scale so that the patch size still fits into the given image minimum_scale = max(self.m_patch_box.size_f[0] / image.shape[-2], self.m_patch_box.size_f[1] / image.shape[-1]) if self.m_lowest_scale: maximum_scale = min(minimum_scale / self.m_lowest_scale, 1.) else: maximum_scale = 1. current_scale_power = 0. # iterate over all possible scales while True: # scale the image scale = minimum_scale * math.pow(self.m_scale_factor, current_scale_power) if scale > maximum_scale: # image is smaller than the requested minimum size break current_scale_power -= 1. scaled_image_shape = bob.ip.base.scaled_output_shape(image, scale) # return both the scale and the scaled image size yield scale, scaled_image_shape
scales(image) -> scale, shape Computes the all possible scales for the given image and yields a tuple of the scale and the scaled image shape as an iterator. **Parameters::** ``image`` : array_like(2D or 3D) The image, for which the scales should be computed **Yields:** ``scale`` : float The next scale of the image to be considered ``shape`` : (int, int) or (int, int, int) The shape of the image, when scaled with the current ``scale``
def sample_scaled(self, shape): for y in range(0, shape[-2]-self.m_patch_box.bottomright[0], self.m_distance): for x in range(0, shape[-1]-self.m_patch_box.bottomright[1], self.m_distance): # create bounding box for the current shift yield self.m_patch_box.shift((y,x))
sample_scaled(shape) -> bounding_box Yields an iterator that iterates over all sampled bounding boxes in the given (scaled) image shape. **Parameters:** ``shape`` : (int, int) or (int, int, int) The (current) shape of the (scaled) image **Yields:** ``bounding_box`` : :py:class:`BoundingBox` An iterator iterating over all bounding boxes that are valid for the given shape
def sample(self, image): for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image for bb in self.sample_scaled(scaled_image_shape): # extract features for yield bb.scale(1./scale)
sample(image) -> bounding_box Yields an iterator over all bounding boxes in different scales that are sampled for the given image. **Parameters:** ``image`` : array_like(2D or 3D) The image, for which the bounding boxes should be generated **Yields:** ``bounding_box`` : :py:class:`BoundingBox` An iterator iterating over all bounding boxes for the given ``image``
def iterate(self, image, feature_extractor, feature_vector): for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image feature_extractor.prepare(image, scale) for bb in self.sample_scaled(scaled_image_shape): # extract features for feature_extractor.extract_indexed(bb, feature_vector) yield bb.scale(1./scale)
iterate(image, feature_extractor, feature_vector) -> bounding_box Scales the given image, and extracts features from all possible bounding boxes. For each of the sampled bounding boxes, this function fills the given pre-allocated feature vector and yields the current bounding box. **Parameters:** ``image`` : array_like(2D) The given image to extract features for ``feature_extractor`` : :py:class:`FeatureExtractor` The feature extractor to use to extract the features for the sampled patches ``feature_vector`` : :py:class:`numpy.ndarray` (1D, uint16) The pre-allocated feature vector that will be filled inside this function; needs to be of size :py:attr:`FeatureExtractor.number_of_features` **Yields:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box for which the current features are extracted for
def iterate_cascade(self, cascade, image, threshold = None): for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image cascade.prepare(image, scale) for bb in self.sample_scaled(scaled_image_shape): # return the prediction and the bounding box, if the prediction is over threshold prediction = cascade(bb) if threshold is None or prediction > threshold: yield prediction, bb.scale(1./scale)
iterate_cascade(self, cascade, image, [threshold]) -> prediction, bounding_box Iterates over the given image and computes the cascade of classifiers. This function will compute the cascaded classification result for the given ``image`` using the given ``cascade``. It yields a tuple of prediction value and the according bounding box. If a ``threshold`` is specified, only those ``prediction``\s are returned, which exceed the given ``threshold``. .. note:: The ``threshold`` does not overwrite the cascade thresholds `:py:attr:`Cascade.thresholds`, but only threshold the final prediction. Specifying the ``threshold`` here is just slightly faster than thresholding the yielded prediction. **Parameters:** ``cascade`` : :py:class:`Cascade` The cascade that performs the predictions ``image`` : array_like(2D) The image for which the predictions should be computed ``threshold`` : float The threshold, which limits the number of predictions **Yields:** ``prediction`` : float The prediction value for the current bounding box ``bounding_box`` : :py:class:`BoundingBox` An iterator over all possible sampled bounding boxes (which exceed the prediction ``threshold``, if given)
def pass_service(*names): def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): for name in names: kwargs[name] = service_proxy(name) return f(*args, **kwargs) return wrapper return decorator
Injects a service instance into the kwargs
def get_conn(): if os.environ.get('DEBUG', False) or os.environ.get('travis', False): # In DEBUG mode - use the local DynamoDB # This also works for travis since we'll be running dynalite conn = DynamoDBConnection( host='localhost', port=8000, aws_access_key_id='TEST', aws_secret_access_key='TEST', is_secure=False ) else: # Regular old production conn = DynamoDBConnection() return conn
Return a connection to DynamoDB.
def map_index_val(index_val): if index_val is None: return DynamoMappings.NONE_VAL index_val = str(index_val) if not index_val: return DynamoMappings.EMPTY_STR_VAL return index_val
Xform index_val so that it can be stored/queried.
def table_schema_call(self, target, cls): index_defs = [] for name in cls.index_names() or []: index_defs.append(GlobalIncludeIndex( gsi_name(name), parts=[HashKey(name)], includes=['value'] )) return target( cls.get_table_name(), connection=get_conn(), schema=[HashKey('id')], global_indexes=index_defs or None )
Perform a table schema call. We call the callable target with the args and keywords needed for the table defined by cls. This is how we centralize the Table.create and Table ctor calls.
def ensure_table(self, cls): exists = True conn = get_conn() try: descrip = conn.describe_table(cls.get_table_name()) assert descrip is not None except ResourceNotFoundException: # Expected - this is what we get if there is no table exists = False except JSONResponseError: # Also assuming no table exists = False if not exists: table = self.table_schema_call(Table.create, cls) assert table is not None
Required functionality.
def find_one(self, cls, id): try: db_result = self.get_class_table(cls).lookup(id) except ItemNotFound: # according to docs, this shouldn't be required, but it IS db_result = None if not db_result: return None obj = cls.from_data(db_result['value']) return obj
Required functionality.
def find_all(self, cls): final_results = [] table = self.get_class_table(cls) for db_result in table.scan(): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
Required functionality.
def find_by_index(self, cls, index_name, value): query_args = { index_name + '__eq': DynamoMappings.map_index_val(value), 'index': gsi_name(index_name) } final_results = [] for db_result in self.get_class_table(cls).query_2(**query_args): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
Required functionality.
def save(self, obj): if not obj.id: obj.id = uuid() stored_data = { 'id': obj.id, 'value': obj.to_data() } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = DynamoMappings.map_index_val(val) table = self.get_class_table(obj.__class__) item = Item(table, data=stored_data) item.save(overwrite=True)
Required functionality.
def process_event(self, name, subject, data): method_mapping = Registry.get_event(name) if not method_mapping: log.info('@{}.process_event no subscriber for event `{}`' .format(self.__class__.__name__, name)) return for event, methods in method_mapping.items(): event_instance = event(subject, data) log.info('@{}.process_event `{}` for subject `{}`'.format( self.__class__.__name__, event_instance.__class__.__name__, subject )) for method in methods: with self._context_manager: log.info('>> Calling subscriber `{}`' .format(method.__name__)) method(event_instance)
Process a single event. :param name: :param subject: :param data:
def thread(self): log.info('@{}.thread starting'.format(self.__class__.__name__)) thread = threading.Thread(target=thread_wrapper(self.consume), args=()) thread.daemon = True thread.start()
Start a thread for this consumer.
def create(parser: Parser, obj: PersistedObject = None): if obj is not None: return _InvalidParserException('Error ' + str(obj) + ' cannot be parsed using ' + str(parser) + ' since ' + ' this parser does not support ' + obj.get_pretty_file_mode()) else: return _InvalidParserException('Error this parser is neither SingleFile nor MultiFile !')
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param obj: :return:
def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject, parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: pass
First parse all children from the parsing plan, then calls _build_object_from_parsed_children :param desired_type: :param obj: :param parsing_plan_for_children: :param logger: :param options: :return: