code
stringlengths
733
1.05M
from django.db import IntegrityError from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_400_BAD_REQUEST from treeherder.model.models import JobType, Push, Repository, InvestigatedTests from treeherder.webapp.api.serializers import InvestigatedTestsSerializers class InvestigatedViewSet(viewsets.ModelViewSet): """ Handles creating, reading and deleting investigated tests """ serializer_class = InvestigatedTestsSerializers allowed_methods = ['GET', 'POST', 'DELETE'] def get_queryset(self): revision = self.request.GET['revision'] project = self.kwargs['project'] try: repository = Repository.objects.get(name=project) push = Push.objects.get(revision=revision, repository=repository) queryset = InvestigatedTests.objects.filter(push=push) return queryset except Push.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) except InvestigatedTests.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) def create(self, request, *args, **kwargs): project = kwargs['project'] revision = request.query_params.get('revision') test = request.data['test'] jobName = request.data['jobName'] jobSymbol = request.data['jobSymbol'] try: repository = Repository.objects.get(name=project) push = Push.objects.get(revision=revision, repository=repository) job_type = JobType.objects.get(name=jobName, symbol=jobSymbol) serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save(push=push, job_type=job_type, test=test) return Response(serializer.data, status=status.HTTP_201_CREATED) except IntegrityError: return Response( "{0} already marked investigated".format(test), status=HTTP_400_BAD_REQUEST ) except Push.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) except JobType.DoesNotExist: return Response( "No JobType with job name: {0}".format(jobName), status=HTTP_404_NOT_FOUND ) def destroy(self, request, project, pk=None): try: investigated_test = InvestigatedTests.objects.get(pk=pk) investigated_test.delete() return Response( status=status.HTTP_204_NO_CONTENT, ) except InvestigatedTests.DoesNotExist: return Response("Test already uninvestigated", status=HTTP_404_NOT_FOUND)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations def load_init_data(apps, schema_editor): ModerationStatus = apps.get_model("elections", "ModerationStatus") recs = [ ModerationStatus( short_label="Suggested", long_label="Suggested by an anonymous user" ), ModerationStatus(short_label="Rejected", long_label="Rejected by a moderator"), ModerationStatus(short_label="Approved", long_label="Approved by a moderator"), ModerationStatus( short_label="Deleted", long_label="Deleted (because it was added in error)" ), ] ModerationStatus.objects.bulk_create(recs) def delete_init_data(apps, schema_editor): ModerationStatus = apps.get_model("elections", "ModerationStatus") ModerationStatus.objects.all().delete() class Migration(migrations.Migration): dependencies = [("elections", "0047_auto_20181005_1320")] operations = [migrations.RunPython(load_init_data, delete_init_data)]
from vsg import parser class keyword(parser.keyword): ''' unique_id = assertion : keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class condition(parser.condition): ''' unique_id = assertion : condition ''' def __init__(self, sString): parser.condition.__init__(self, sString) class report_keyword(parser.keyword): ''' unique_id = assertion : report_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class report_expression(parser.expression): ''' unique_id = assertion : report_expression ''' def __init__(self, sString): parser.expression.__init__(self, sString) class severity_keyword(parser.keyword): ''' unique_id = assertion : severity_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class severity_expression(parser.expression): ''' unique_id = assertion : severity_expression ''' def __init__(self, sString): parser.expression.__init__(self, sString)
""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken and by default the system-installed version of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). This behavior can also be controlled using the ``--auto-use`` and ``--no-auto-use`` command-line flags. For clarity, an alias for ``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using the latter if needed. Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser _str_types = (str, bytes) # What follows are several import statements meant to deal with install-time # issues with either missing or misbehaving pacakges (including making sure # setuptools itself is installed): # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() # typing as a dependency for 1.6.1+ Sphinx causes issues when imported after # initializing submodule with ah_boostrap.py # See discussion and references in # https://github.com/astropy/astropy-helpers/issues/302 try: import typing # noqa except ImportError: pass # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat # noqa except ImportError: pass # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass # End compatibility imports... # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup from distutils import log from distutils.debug import DEBUG # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' UPPER_VERSION_EXCLUSIVE = None # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True OFFLINE = False AUTO_UPGRADE = True # A list of all the configuration options and their required types CFG_OPTIONS = [ ('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('offline', bool), ('auto_upgrade', bool) ] class _Bootstrapper(object): """ Bootstrapper implementation. See ``use_astropy_helpers`` for parameter documentation. """ def __init__(self, path=None, index_url=None, use_git=None, offline=None, download_if_needed=None, auto_upgrade=None): if path is None: path = PACKAGE_NAME if not (isinstance(path, _str_types) or path is False): raise TypeError('path must be a string or False') if not isinstance(path, str): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) # path to unicode self.path = path # Set other option attributes, using defaults where necessary self.index_url = index_url if index_url is not None else INDEX_URL self.offline = offline if offline is not None else OFFLINE # If offline=True, override download and auto-upgrade if self.offline: download_if_needed = False auto_upgrade = False self.download = (download_if_needed if download_if_needed is not None else DOWNLOAD_IF_NEEDED) self.auto_upgrade = (auto_upgrade if auto_upgrade is not None else AUTO_UPGRADE) # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False self.use_git = use_git if use_git is not None else USE_GIT # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) self.is_submodule = False @classmethod def main(cls, argv=None): if argv is None: argv = sys.argv config = cls.parse_config() config.update(cls.parse_command_line(argv)) auto_use = config.pop('auto_use', False) bootstrapper = cls(**config) if auto_use: # Run the bootstrapper, otherwise the setup.py is using the old # use_astropy_helpers() interface, in which case it will run the # bootstrapper manually after reconfiguring it. bootstrapper.run() return bootstrapper @classmethod def parse_config(cls): if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\n{1} will not be " "automatically bootstrapped and package installation may fail." "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) return {} if not cfg.has_section('ah_bootstrap'): return {} config = {} for option, type_ in CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) config[option] = value return config @classmethod def parse_command_line(cls, argv=None): if argv is None: argv = sys.argv config = {} # For now we just pop recognized ah_bootstrap options out of the # arg list. This is imperfect; in the unlikely case that a setup.py # custom command or even custom Distribution class defines an argument # of the same name then we will break that. However there's a catch22 # here that we can't just do full argument parsing right here, because # we don't yet know *how* to parse all possible command-line arguments. if '--no-git' in argv: config['use_git'] = False argv.remove('--no-git') if '--offline' in argv: config['offline'] = True argv.remove('--offline') if '--auto-use' in argv: config['auto_use'] = True argv.remove('--auto-use') if '--no-auto-use' in argv: config['auto_use'] = False argv.remove('--no-auto-use') if '--use-system-astropy-helpers' in argv: config['auto_use'] = False argv.remove('--use-system-astropy-helpers') return config def run(self): strategies = ['local_directory', 'local_file', 'index'] dist = None # First, remove any previously imported versions of astropy_helpers; # this is necessary for nested installs where one package's installer # is installing another package via setuptools.sandbox.run_setup, as in # the case of setup_requires for key in list(sys.modules): try: if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): del sys.modules[key] except AttributeError: # Sometimes mysterious non-string things can turn up in # sys.modules continue # Check to see if the path is a submodule self.is_submodule = self._check_submodule() for strategy in strategies: method = getattr(self, 'get_{0}_dist'.format(strategy)) dist = method() if dist is not None: break else: raise _AHBootstrapSystemExit( "No source found for the {0!r} package; {0} must be " "available and importable as a prerequisite to building " "or installing this package.".format(PACKAGE_NAME)) # This is a bit hacky, but if astropy_helpers was loaded from a # directory/submodule its Distribution object gets a "precedence" of # "DEVELOP_DIST". However, in other cases it gets a precedence of # "EGG_DIST". However, when activing the distribution it will only be # placed early on sys.path if it is treated as an EGG_DIST, so always # do that dist = dist.clone(precedence=pkg_resources.EGG_DIST) # Otherwise we found a version of astropy-helpers, so we're done # Just active the found distribution on sys.path--if we did a # download this usually happens automatically but it doesn't hurt to # do it again # Note: Adding the dist to the global working set also activates it # (makes it importable on sys.path) by default. try: pkg_resources.working_set.add(dist, replace=True) except TypeError: # Some (much) older versions of setuptools do not have the # replace=True option here. These versions are old enough that all # bets may be off anyways, but it's easy enough to work around just # in case... if dist.key in pkg_resources.working_set.by_key: del pkg_resources.working_set.by_key[dist.key] pkg_resources.working_set.add(dist) @property def config(self): """ A `dict` containing the options this `_Bootstrapper` was configured with. """ return dict((optname, getattr(self, optname)) for optname, _ in CFG_OPTIONS if hasattr(self, optname)) def get_local_directory_dist(self): """ Handle importing a vendored package from a subdirectory of the source distribution. """ if not os.path.isdir(self.path): return log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if self.is_submodule else 'directory', self.path)) dist = self._directory_import() if dist is None: log.warn( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.'.format(self.path, PACKAGE_NAME)) elif self.auto_upgrade and not self.is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_index_dist(self): if not self.download: log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) return None log.warn( "Downloading {0!r}; run setup.py with the --offline option to " "force offline installation.".format(DIST_NAME)) try: dist = self._do_download() except Exception as e: if DEBUG: raise log.warn( 'Failed to download and/or install {0!r} from {1!r}:\n' '{2}'.format(DIST_NAME, self.index_url, str(e))) dist = None # No need to run auto-upgrade here since we've already presumably # gotten the most up-to-date version from the package index return dist def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _do_download(self, version='', find_links=None): if find_links: allow_hosts = '' index_url = None else: allow_hosts = None index_url = self.index_url # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightforward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: if UPPER_VERSION_EXCLUSIVE is None: req = DIST_NAME else: req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE) attrs = {'setup_requires': [req]} # NOTE: we need to parse the config file (e.g. setup.cfg) to make sure # it honours the options set in the [easy_install] section, and we need # to explicitly fetch the requirement eggs as setup_requires does not # get honored in recent versions of setuptools: # https://github.com/pypa/setuptools/issues/1273 try: context = _verbose if DEBUG else _silence with context(): dist = _Distribution(attrs=attrs) try: dist.parse_config_files(ignore_option_errors=True) dist.fetch_build_eggs(req) except TypeError: # On older versions of setuptools, ignore_option_errors # doesn't exist, and the above two lines are not needed # so we can just continue pass # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving {0} from {1}:\n{2}' if find_links: source = find_links[0] elif index_url != INDEX_URL: source = index_url else: source = 'PyPI' raise Exception(msg.format(DIST_NAME, source, repr(e))) def _do_upgrade(self, dist): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) next_version = _next_version(dist.parsed_version) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) package_index = PackageIndex(index_url=self.index_url) upgrade = package_index.obtain(req) if upgrade is not None: return self._do_download(version=upgrade.version) def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git() def _check_submodule_using_git(self): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ cmd = ['git', 'submodule', 'status', '--', self.path] try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except _CommandNotFound: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False stderr = stderr.strip() if returncode != 0 and stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other unknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if # submodule is initialized but is not at the commit currently indicated # in .gitmodules (and thus needs to be updated), or 'U' if the # submodule is in an unstable state (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really # need this information but it's useful for checking that the output is # correct) # # 3. The output of `git describe` for the submodule's current commit # hash (this includes for example what branches the commit is on) but # only if the submodule is initialized. We ignore this information for # now _git_submodule_status_re = re.compile( '^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) ' '(?P<submodule>\S+)( .*)?$') # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule self._update_submodule(m.group('submodule'), m.group('status')) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, self.path)) return False def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False def _update_submodule(self, submodule, status): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if self.offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; " "this requires being able to clone the submodule from an " "online repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if self.offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBootstrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except OSError as e: err_msg = str(e) else: if returncode != 0: err_msg = stderr if err_msg is not None: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _CommandNotFound(OSError): """ An exception raised when a command run with run_cmd is not found on the system. """ def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, str): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, str): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr) def _next_version(version): """ Given a parsed version from pkg_resources.parse_version, returns a new version string with the next minor version. Examples ======== >>> _next_version(pkg_resources.parse_version('1.2.3')) '1.3.0' """ if hasattr(version, 'base_version'): # New version parsing from setuptools >= 8.0 if version.base_version: parts = version.base_version.split('.') else: parts = [] else: parts = [] for part in version: if part.startswith('*'): break parts.append(part) parts = [int(p) for p in parts] if len(parts) < 3: parts += [0] * (3 - len(parts)) major, minor, micro = parts[:3] return '{0}.{1}.{2}'.format(major, minor + 1, 0) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _verbose(): yield @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) BOOTSTRAPPER = _Bootstrapper.main() def use_astropy_helpers(**kwargs): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`. """ global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run()
# coding=utf-8 # Author: Nic Wolfe <[email protected]> # URL: https://sickchill.github.io # # This file is part of SickChill. # # SickChill is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickChill is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickChill. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, print_function, unicode_literals # Local Folder Imports from .common import PageTemplate from .home import Home from .routes import Route @Route('/IRC(/?.*)', name='irc') class HomeIRC(Home): def __init__(self, *args, **kwargs): super(HomeIRC, self).__init__(*args, **kwargs) def index(self, *args_, **kwargs_): t = PageTemplate(rh=self, filename="IRC.mako") return t.render(topmenu="system", header=_("IRC"), title=_("IRC"), controller="IRC", action="index")
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of a VariablePool. VariablePool interface allows centrlized variable creation and maintance. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from typing import Any, Text, List import numpy as np import six import tensorflow.compat.v1 as tf from structured_multihashing.smh import virtual_variable SUPPORTED_DTYPES = [tf.bfloat16, tf.float16, tf.float32, tf.float64] VARIABLE_POOL_TYPE = tf.float32 @six.add_metaclass(abc.ABCMeta) class VariablePool(object): """A class that handles a pool of virtual-variables. Allocates slices of the pool in an non-overlapping way. """ @property def core_variables(self): """A list of variables created by the VariablePool.""" return self._core_variables @property def scope_name(self): """The name of the scope core variables are created in.""" return self._scope_name @property def core_size(self): """The number of variables used by the pool.""" return sum(var.shape.num_elements() for var in self.core_variables) @abc.abstractproperty def status(self): """Total number indices allocated.""" pass @abc.abstractproperty def pool_size(self): """Total number of elements supported by the VariablePool.""" pass @abc.abstractmethod def get_slice(self, shape): """Allocates variables from the variable pool with given shape. Args: shape: A list of integers representing the shape of the slice. Same as tf.get_variable(). Returns: A tf.Tensor with appropriate shape. """ pass class ProductVariablePool(VariablePool): """A VariablePool were the pool is a matrix products.""" def __init__(self, trainable, pool_size, fraction, stddev = 1.0, initializer = tf.random_normal_initializer, use_kronecker_product = False, index_store_type = virtual_variable.IndexStoreType.basic): """Creates an instance of `ProductVariablePool`. Args: trainable: boolean, indicate whether the created variables are trainable or not. pool_size: int, total number of virtual variables requried. The acutal number of virtual variables created can be larger than the number specified by this argument. fraction: float, the fraction of `pool_size` of variables to create. stddev: float, standard deviation for the variable pool. Default value is 1.0. initializer: A tf.initializer e.g. 'truncated_normal_initializer' or 'random_normal_initializer'. Default value is tf.random_normal_initializer. use_kronecker_product: Indicate product should be a kronecker product or a matrix prodcut. index_store_type: IndexStoreType, key of SUPPORTED_INDEX_STORES. """ if fraction <= 0 or fraction > 1.0: raise ValueError('fraction %f must be >0 and <=1.0' % fraction) self._scope_name = 'ProductVariablePool' if use_kronecker_product: variable_generator = _create_kronecker_variables else: variable_generator = _create_matmul_variables with tf.variable_scope(self._scope_name): variables, size, pool = variable_generator(pool_size, fraction, initializer, stddev, trainable) self._core_variables = variables self._virtual_variables = tf.reshape(pool, [size], name='weight_pool') index_store_cls = virtual_variable.get_index_store(index_store_type) self._index_store = index_store_cls(size) @property def status(self): return self._index_store.current() @property def pool_size(self): return self._index_store.size def get_slice(self, shape): """Allocates variables from the variable pool with given shape.""" tensor_num_elements = int(np.prod(shape)) sliced_virtual_variables = self._index_store.allocate_variables_from_pool( self._virtual_variables, tensor_num_elements) weight_tensor = tf.reshape(sliced_virtual_variables, shape) return tf.identity(weight_tensor, 'product_slice%d' % self.status) HASH_POOL_SEED = 412013 class HashVariablePool(VariablePool): """A VariablePool were mapping into pool is hashed.""" def __init__(self, trainable, stddev, pool_size, fraction, initializer, seed = HASH_POOL_SEED, index_store_type = virtual_variable.IndexStoreType.basic): """Creates an instance of `HashVariablePool`. Args: trainable: boolean, indicate whether the created variables are trainable or not. stddev: float, standard deviation for the variable pool. pool_size: int, total number of virtual variables requried. The acutal number of virtual variables created can be larger than the number specified by this argument. fraction: float, the fraction of `pool_size` of variables to create. initializer: A tf.initializer e.g. 'truncated_normal_initializer' or 'random_normal'. seed: Integer, seed for the random hashing. index_store_type: String, key of SUPPORTED_INDEX_STORES. 'padding' is not supported by HashVariablePool yet. """ del seed # unused if fraction <= 0 or fraction > 1.0: raise ValueError('fraction %f must be >0 and <=1.0' % fraction) self._scope_name = 'HashVariablePool' self._hash_indices = None hash_size = int(np.floor(fraction * pool_size)) if not hash_size: raise ValueError( 'fraction %f too low, results in 0 size hash for pool size %d.' % (fraction, pool_size)) index_store_cls = virtual_variable.get_index_store(index_store_type) self._index_store = index_store_cls(pool_size) if self._index_store.type == virtual_variable.IndexStoreType.padding: raise ValueError('HashVariablePool does not support PaddingIndexStore ' 'yet.') replicas = int(np.ceil(float(pool_size + 1) / hash_size)) # The following is for python2/3 compatibility. As range(k) does not return # a list in python 3. base_index_list = range(hash_size) if not isinstance(base_index_list, list): base_index_list = list(base_index_list) indices = np.array(base_index_list * replicas) # len(indices) = hash_size * replicas # >= hash_size * (pool_size + 1) / hash_size # ~ pool_size assert len(indices) >= pool_size indices = indices[:pool_size] # Preserving the state is done in order to not mess up with other elements # that might depend on numpy seed for some reason. # debuggin: # np_state = np.random.get_state() # np.random.seed(seed=seed) # random_indices = np.random.permutation(len(indices)) # tf.logging.info('First 4 indices = %d %d', random_indices[:4], seed) # self._set_hash_indices(random_indices) # np.random.set_state(np_state) self._set_hash_indices(indices) with tf.variable_scope(self._scope_name): self._hash = tf.get_variable( 'hash', [int(hash_size)], trainable=trainable, initializer=initializer(stddev=stddev)) self._core_variables = [self._hash] @property def status(self): return self._index_store.current() @property def pool_size(self): return self._index_store.size def _set_hash_indices(self, indices): if self._hash_indices is not None and len( self._hash_indices) != len(indices): raise ValueError('Trying to set wrong length of indices %d' % len(indices)) self._hash_indices = indices def get_slice(self, shape): """Allocates variables from self._virtual_variables.""" tensor_num_elements = int(np.prod(shape)) sliced_hash_indices = self._index_store.allocate_variables_from_pool( tf.convert_to_tensor(self._hash_indices), tensor_num_elements) sliced_virtual_variables = tf.gather( self._hash, indices=sliced_hash_indices) weight_tensor = tf.reshape(sliced_virtual_variables, shape) return tf.identity(weight_tensor, 'hash_slice_%d' % self.status) VARIABLE_POOLS_NAMES = { 'PRODUCT_POOL': ProductVariablePool, 'HASH_POOL': HashVariablePool } def has_seed_arg(class_name): return 'HASH_POOL' == class_name class MetaVariablePool(VariablePool): """MetaVariablePool returns a specified of other variable pools.""" def __init__(self, trainable, stddev, pool_size, fraction, initializer, elements, reduce_by = 'SUM', index_store_type = virtual_variable.IndexStoreType.basic): """Creates an instance of `MetaVariablePool`. Args: trainable: boolean, indicate whether the created variables are trainable or not. stddev: float, standard deviation for the variable pool. pool_size: int, total number of virtual variables requried. The acutal number of virtual variables created can be larger than the number specified by this argument. fraction: float, the fraction of `pool_size` of variables to create. initializer: A tf.initializer e.g. 'truncated_normal_initializer' or 'random_normal'. elements: Names of VariablePools to build, must be keys of VARIABLE_POOLS_NAMES. reduce_by: SUM, PRODUCT. index_store_type: String, key of SUPPORTED_INDEX_STORES. """ if fraction <= 0 or fraction > 1.0: raise ValueError('fraction %f must be >0 and <=1.0' % fraction) self._scope_name = 'MetaVariablePool' self._sub_pools = [] self._core_variables = [] if reduce_by == 'SUM': self._reduce = tf.add_n elif reduce_by == 'PROD': self._reduce = _prod_n else: raise ValueError('unsupported reduce_by %s' % reduce_by) num_elements = len(elements) element_stddev = stddev / num_elements element_fraction = fraction / num_elements with tf.variable_scope(self._scope_name): for i, class_name in enumerate(elements): if class_name not in VARIABLE_POOLS_NAMES: raise ValueError( 'Unrecognized element %s, supported elements are %s' % (class_name, VARIABLE_POOLS_NAMES.keys())) kwargs = { 'trainable': trainable, 'stddev': element_stddev, 'pool_size': pool_size, 'fraction': element_fraction, 'initializer': initializer, 'index_store_type': index_store_type } with tf.variable_scope('subpool_%d' % i): if has_seed_arg(class_name): kwargs['seed'] = HASH_POOL_SEED + i pool = VARIABLE_POOLS_NAMES[class_name](**kwargs) # type: ignore self._sub_pools.append(pool) self._core_variables.extend(pool.core_variables) @property def status(self): return self._sub_pools[0].status @property def pool_size(self): return self._sub_pools[0].pool_size def get_slice(self, shape): """Allocates variables from the variable pool with given shape.""" weight_tensor = self._reduce( [pool.get_slice(shape) for pool in self._sub_pools]) return tf.identity(weight_tensor, 'meta_slice%d' % self.status) def _compute_correlated_stddev(target_std, n_terms): """Computes corrected std for Kronecker Products initialization. Computes std of a_i,b_i such that std(sum a_i*b_i) = target_std; assuming a_i, b_i ~ i.i.d with mean=0 and var = corrected_stddev^2. Details of computation: target_std^2 = var(sum(a_i*b_i)) = sum(var(a_i*b_i)) = = num_elements*var(a_i)*var(b_i) = num_elements*corrected_stddev^4 ===> corrected_stddev = sqrt(target_std/sqrt(num_elements)) Args: target_std: Target std we are looking to achieve n_terms: Number of terms (a_i*b_i) in summation. Returns: Corrected std value. """ corrected_stddev = np.sqrt(target_std / np.sqrt(n_terms)) return np.float32(corrected_stddev) def _prod_n(tensors): """Returns the product of the elements in the `tensors`. Like tf.add_n, but with a product. Args: tensors: A list of tf.Tensor to multiply. Returns: The product of elements of tensors """ if not tensors: raise ValueError('Empty list of tensors') res = tensors[0] for tensor in tensors[1:]: res *= tensor return res def _create_kronecker_variables(pool_size, fraction, initializer, stddev, trainable): """Creates kronecker product variable pool from parameters.""" # When using a Kronecker product we create a pair of matrices of size: # (num_replicas, matrix_dim, matrix_dim) a reduce sum of their product is # of size (maxtrix_dim^2, maxtrix_dim^2) with maxtrix_dim^4 elements. matrix_dim = int(np.ceil(pool_size**0.25)) num_elements = matrix_dim * matrix_dim num_replicas = int(np.floor(0.5 * fraction * pool_size / num_elements)) if not num_replicas: raise ValueError( 'fraction %f too low, results in 0 replicas for pool size %d dim %d.' % (fraction, pool_size, matrix_dim)) size = matrix_dim**4 correlated_stddev = _compute_correlated_stddev(stddev, num_replicas) left_matrix = tf.get_variable( 'variable_left', [num_replicas, matrix_dim, matrix_dim], trainable=trainable, dtype=VARIABLE_POOL_TYPE, initializer=initializer(stddev=correlated_stddev)) right_matrix = tf.get_variable( 'variable_right', [num_replicas, matrix_dim, matrix_dim], trainable=trainable, dtype=VARIABLE_POOL_TYPE, initializer=initializer(stddev=correlated_stddev)) left = tf.linalg.LinearOperatorFullMatrix(left_matrix, is_square=True) right = tf.linalg.LinearOperatorFullMatrix(right_matrix, is_square=True) pool = tf.reduce_sum( tf.linalg.LinearOperatorKronecker([left, right], is_square=True).to_dense(), 0) return [left_matrix, right_matrix], size, pool def _create_matmul_variables(pool_size, fraction, initializer, stddev, trainable): """Creates matrix-multiply variable pool from parameters.""" # When using a matrix multiply we create a pair of matrices of size: # (matrix_dim, num_replicas) and (num_replicas, matrix_dim) their products # is of size (maxtrix_dim, maxtrix_dim) with maxtrix_dim^2 elements. matrix_dim = int(np.ceil(pool_size**0.5)) num_replicas = int(np.floor(0.5 * fraction * pool_size / matrix_dim)) if not num_replicas: raise ValueError( 'fraction %f too low, results in 0 replicas for pool size %d dim %d.' % (fraction, pool_size, matrix_dim)) size = int(matrix_dim**2) correlated_stddev = _compute_correlated_stddev(stddev, num_replicas) left_matrix = tf.get_variable( 'variable_left', [matrix_dim, num_replicas], trainable=trainable, dtype=VARIABLE_POOL_TYPE, initializer=initializer(stddev=correlated_stddev)) right_matrix = tf.get_variable( 'variable_right', [num_replicas, matrix_dim], trainable=trainable, dtype=VARIABLE_POOL_TYPE, initializer=initializer(stddev=correlated_stddev)) pool = tf.matmul(left_matrix, right_matrix) return [left_matrix, right_matrix], size, pool
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Discrete Cosine Transform ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math as _math from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops as _array_ops from tensorflow.python.ops import math_ops as _math_ops from tensorflow.python.ops.signal import fft_ops from tensorflow.python.util.tf_export import tf_export def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm): """Checks that DCT/IDCT arguments are compatible and well formed.""" if n is not None: raise NotImplementedError("The DCT length argument is not implemented.") if axis != -1: raise NotImplementedError("axis must be -1. Got: %s" % axis) if dct_type not in (1, 2, 3): raise ValueError("Only Types I, II and III (I)DCT are supported.") if dct_type == 1: if norm == "ortho": raise ValueError("Normalization is not supported for the Type-I DCT.") if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2: raise ValueError( "Type-I DCT requires the dimension to be greater than one.") if norm not in (None, "ortho"): raise ValueError( "Unknown normalization. Expected None or 'ortho', got: %s" % norm) # TODO(rjryan): Implement `n` and `axis` parameters. @tf_export("signal.dct", v1=["signal.dct", "spectral.dct"]) def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`. Currently only Types I, II and III are supported. Type I is implemented using a length `2N` padded `tf.spectral.rfft`. Type II is implemented using a length `2N` padded `tf.spectral.rfft`, as described here: https://dsp.stackexchange.com/a/10606. Type III is a fairly straightforward inverse of Type II (i.e. using a length `2N` padded `tf.spectral.irfft`). @compatibility(scipy) Equivalent to scipy.fftpack.dct for Type-I, Type-II and Type-III DCT. https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html @end_compatibility Args: input: A `[..., samples]` `float32` `Tensor` containing the signals to take the DCT of. type: The DCT type to perform. Must be 1, 2 or 3. n: For future expansion. The length of the transform. Must be `None`. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32` `Tensor` containing the DCT of `input`. Raises: ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is not `-1`, or `norm` is not `None` or `'ortho'`. ValueError: If `type` is `1` and `norm` is `ortho`. [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform """ _validate_dct_arguments(input, type, n, axis, norm) with _ops.name_scope(name, "dct", [input]): # We use the RFFT to compute the DCT and TensorFlow only supports float32 # for FFTs at the moment. input = _ops.convert_to_tensor(input, dtype=_dtypes.float32) axis_dim = (tensor_shape.dimension_value(input.shape[-1]) or _array_ops.shape(input)[-1]) axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32) if type == 1: dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1) dct1 = _math_ops.real(fft_ops.rfft(dct1_input)) return dct1 if type == 2: scale = 2.0 * _math_ops.exp( _math_ops.complex( 0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 / axis_dim_float)) # TODO(rjryan): Benchmark performance and memory usage of the various # approaches to computing a DCT via the RFFT. dct2 = _math_ops.real( fft_ops.rfft( input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale) if norm == "ortho": n1 = 0.5 * _math_ops.rsqrt(axis_dim_float) n2 = n1 * _math_ops.sqrt(2.0) # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. weights = _array_ops.pad( _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], constant_values=n2) dct2 *= weights return dct2 elif type == 3: if norm == "ortho": n1 = _math_ops.sqrt(axis_dim_float) n2 = n1 * _math_ops.sqrt(0.5) # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. weights = _array_ops.pad( _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], constant_values=n2) input *= weights else: input *= axis_dim_float scale = 2.0 * _math_ops.exp( _math_ops.complex( 0.0, _math_ops.range(axis_dim_float) * _math.pi * 0.5 / axis_dim_float)) dct3 = _math_ops.real( fft_ops.irfft( scale * _math_ops.complex(input, 0.0), fft_length=[2 * axis_dim]))[..., :axis_dim] return dct3 # TODO(rjryan): Implement `n` and `axis` parameters. @tf_export("signal.idct", v1=["signal.idct", "spectral.idct"]) def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin """Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`. Currently only Types I, II and III are supported. Type III is the inverse of Type II, and vice versa. Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is not `'ortho'`. That is: `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`. When `norm='ortho'`, we have: `signal == idct(dct(signal, norm='ortho'), norm='ortho')`. @compatibility(scipy) Equivalent to scipy.fftpack.idct for Type-I, Type-II and Type-III DCT. https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html @end_compatibility Args: input: A `[..., samples]` `float32` `Tensor` containing the signals to take the DCT of. type: The IDCT type to perform. Must be 1, 2 or 3. n: For future expansion. The length of the transform. Must be `None`. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`. Raises: ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is not `-1`, or `norm` is not `None` or `'ortho'`. [idct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms """ _validate_dct_arguments(input, type, n, axis, norm) inverse_type = {1: 1, 2: 3, 3: 2}[type] return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
from lino.django.igen import models from django.contrib import admin class ContactAdmin(admin.ModelAdmin): fieldsets = [ ("Person data", dict(fields=['title','firstName','lastName'])), ("Company data", dict(fields=['companyName','nationalId','vatId'])), ('Postal address', dict(fields=['addr1','addr2','city','zipCode','region',"country"])), ('Contact', dict(fields=['email','phone','gsm'])), ('Invoicing', dict(fields=['paymentTerm','vatExempt','itemVat'], classes=['collapse'])), ('Other', dict(fields=['remarks','language'])), ] list_display = ('__unicode__', "companyName","lastName", "firstName", 'as_address') list_filter = ['firstName','lastName','companyName'] search_fields = ['firstName','lastName','companyName'] ordering=("companyName","lastName","firstName") admin.site.register(models.Contact, ContactAdmin) admin.site.register(models.Product) admin.site.register(models.ProductCat) #admin.site.register(models.Country) #admin.site.register(models.Language) admin.site.register(models.PaymentTerm) admin.site.register(models.ShippingMode) admin.site.register(models.SalesDocument) admin.site.register(models.Invoice) admin.site.register(models.Order) admin.site.register(models.DocItem)
#!/usr/bin/env python #encoding: utf8 import rospy, cv2 from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class FaceToFace(): def __init__(self): sub = rospy.Subscriber("/cv_camera/image_raw", Image, self.get_image) self.bridge = CvBridge() self.image_org = None def get_image(self,img): try: self.image_org = self.bridge.imgmsg_to_cv2(img, "bgr8") except CvBridgeError as e: rospy.logerr(e) def detect_face(self): if self.image_org is None: return None org = self.image_org gimg = cv2.cvtColor(org,cv2.COLOR_BGR2GRAY) classifier = "/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml" cascade = cv2.CascadeClassifier(classifier) face = cascade.detectMultiScale(gimg,1.1,1,cv2.CASCADE_FIND_BIGGEST_OBJECT) if len(face) == 0: return None r = face[0] cv2.rectangle(org,tuple(r[0:2]),tuple(r[0:2]+r[2:4]),(0,255,255),4) cv2.imwrite("/tmp/image.jpg",org) return "detected" if __name__ == '__main__': rospy.init_node('face_to_face') fd = FaceToFace() rate = rospy.Rate(10) while not rospy.is_shutdown(): rospy.loginfo(fd.detect_face()) rate.sleep() # Copyright 2016 Ryuichi Ueda # Released under the MIT License. # To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Ben Doherty <[email protected]> # Sponsored by Oomph, Inc. http://www.oomphinc.com # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: archive version_added: '2.3' short_description: Creates a compressed archive of one or more files or trees extends_documentation_fragment: files description: - Packs an archive. It is the opposite of M(unarchive). By default, it assumes the compression source exists on the target. It will not copy the source file from the local system to the target before archiving. Source files can be deleted after archival by specifying I(remove=True). options: path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. required: true format: description: - The type of compression to use. - Support for xz was added in version 2.5. choices: [ bz2, gz, tar, xz, zip ] default: gz dest: description: - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. exclude_path: version_added: '2.4' description: - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive remove: description: - Remove any added source files and trees after adding to archive. type: bool default: 'no' author: - Ben Doherty (@bendoh) notes: - requires tarfile, zipfile, gzip and bzip2 packages on target host - requires lzma or backports.lzma if using xz format - can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives ''' EXAMPLES = ''' - name: Compress directory /path/to/foo/ into /path/to/foo.tgz archive: path: /path/to/foo dest: /path/to/foo.tgz - name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it archive: path: /path/to/foo remove: yes - name: Create a zip archive of /path/to/foo archive: path: /path/to/foo format: zip - name: Create a bz2 archive of multiple files, rooted at /path archive: path: - /path/to/foo - /path/wong/foo dest: /path/file.tar.bz2 format: bz2 - name: Create a bz2 archive of a globbed path, while excluding specific dirnames archive: path: - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - /path/to/foo/bar - /path/to/foo/baz format: bz2 - name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames archive: path: - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - /path/to/foo/ba* format: bz2 ''' RETURN = ''' state: description: The current state of the archived file. If 'absent', then no source files were found and the archive does not exist. If 'compress', then the file source file is in the compressed state. If 'archive', then the source file or paths are currently archived. If 'incomplete', then an archive was created, but not all source paths were found. type: string returned: always missing: description: Any files that were missing from the source. type: list returned: success archived: description: Any files that were compressed or added to the archive. type: list returned: success arcroot: description: The archive root. type: string returned: always expanded_paths: description: The list of matching paths from paths argument. type: list returned: always expanded_exclude_paths: description: The list of matching exclude paths from the exclude_path argument. type: list returned: always ''' import bz2 import filecmp import glob import gzip import io import os import re import shutil import tarfile import zipfile from traceback import format_exc from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.six import PY3 if PY3: try: import lzma HAS_LZMA = True except ImportError: HAS_LZMA = False else: try: from backports import lzma HAS_LZMA = True except ImportError: HAS_LZMA = False def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='list', required=True), format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), exclude_path=dict(type='list'), remove=dict(type='bool', default=False), ), add_file_common_args=True, supports_check_mode=True, ) params = module.params check_mode = module.check_mode paths = params['path'] dest = params['dest'] exclude_paths = params['exclude_path'] remove = params['remove'] expanded_paths = [] expanded_exclude_paths = [] format = params['format'] globby = False changed = False state = 'absent' # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) archive = False successes = [] # Fail early if not HAS_LZMA and format == 'xz': module.fail_json(msg="lzma or backports.lzma is required when using xz format.") for path in paths: path = os.path.expanduser(os.path.expandvars(path)) # Expand any glob characters. If found, add the expanded glob to the # list of expanded_paths, which might be empty. if ('*' in path or '?' in path): expanded_paths = expanded_paths + glob.glob(path) globby = True # If there are no glob characters the path is added to the expanded paths # whether the path exists or not else: expanded_paths.append(path) # Only attempt to expand the exclude paths if it exists if exclude_paths: for exclude_path in exclude_paths: exclude_path = os.path.expanduser(os.path.expandvars(exclude_path)) # Expand any glob characters. If found, add the expanded glob to the # list of expanded_paths, which might be empty. if ('*' in exclude_path or '?' in exclude_path): expanded_exclude_paths = expanded_exclude_paths + glob.glob(exclude_path) # If there are no glob character the exclude path is added to the expanded # exclude paths whether the path exists or not. else: expanded_exclude_paths.append(exclude_path) if not expanded_paths: return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found') # If we actually matched multiple files or TRIED to, then # treat this as a multi-file archive archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1 # Default created file name (for single-file archives) to # <file>.<format> if not dest and not archive: dest = '%s.%s' % (expanded_paths[0], format) # Force archives to specify 'dest' if archive and not dest: module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') archive_paths = [] missing = [] arcroot = '' for path in expanded_paths: # Use the longest common directory name among all the files # as the archive root path if arcroot == '': arcroot = os.path.dirname(path) + os.sep else: for i in range(len(arcroot)): if path[i] != arcroot[i]: break if i < len(arcroot): arcroot = os.path.dirname(arcroot[0:i + 1]) arcroot += os.sep # Don't allow archives to be created anywhere within paths to be removed if remove and os.path.isdir(path) and dest.startswith(path): module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True') if os.path.lexists(path) and path not in expanded_exclude_paths: archive_paths.append(path) else: missing.append(path) # No source files were found but the named archive exists: are we 'compress' or 'archive' now? if len(missing) == len(expanded_paths) and dest and os.path.exists(dest): # Just check the filename to know if it's an archive or simple compressed file if re.search(r'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(dest), re.IGNORECASE): state = 'archive' else: state = 'compress' # Multiple files, or globbiness elif archive: if not archive_paths: # No source files were found, but the archive is there. if os.path.lexists(dest): state = 'archive' elif missing: # SOME source files were found, but not all of them state = 'incomplete' archive = None size = 0 errors = [] if os.path.lexists(dest): size = os.path.getsize(dest) if state != 'archive': if check_mode: changed = True else: try: # Slightly more difficult (and less efficient!) compression using zipfile module if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True) # Easier compression using tarfile module elif format == 'gz' or format == 'bz2': arcfile = tarfile.open(dest, 'w|' + format) # python3 tarfile module allows xz format but for python2 we have to create the tarfile # in memory and then compress it with lzma. elif format == 'xz': arcfileIO = io.BytesIO() arcfile = tarfile.open(fileobj=arcfileIO, mode='w') # Or plain tar archiving elif format == 'tar': arcfile = tarfile.open(dest, 'w') match_root = re.compile('^%s' % re.escape(arcroot)) for path in archive_paths: if os.path.isdir(path): # Recurse into directories for dirpath, dirnames, filenames in os.walk(path, topdown=True): if not dirpath.endswith(os.sep): dirpath += os.sep for dirname in dirnames: fullpath = dirpath + dirname arcname = match_root.sub('', fullpath) try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) except Exception as e: errors.append('%s: %s' % (fullpath, to_native(e))) for filename in filenames: fullpath = dirpath + filename arcname = match_root.sub('', fullpath) if not filecmp.cmp(fullpath, dest): try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) successes.append(fullpath) except Exception as e: errors.append('Adding %s: %s' % (path, to_native(e))) else: if format == 'zip': arcfile.write(path, match_root.sub('', path)) else: arcfile.add(path, match_root.sub('', path), recursive=False) successes.append(path) except Exception as e: module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, to_native(e)), exception=format_exc()) if arcfile: arcfile.close() state = 'archive' if format == 'xz': with lzma.open(dest, 'wb') as f: f.write(arcfileIO.getvalue()) arcfileIO.close() if errors: module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) if state in ['archive', 'incomplete'] and remove: for path in successes: try: if os.path.isdir(path): shutil.rmtree(path) elif not check_mode: os.remove(path) except OSError as e: errors.append(path) if errors: module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors) # Rudimentary check: If size changed then file changed. Not perfect, but easy. if not check_mode and os.path.getsize(dest) != size: changed = True if successes and state != 'incomplete': state = 'archive' # Simple, single-file compression else: path = expanded_paths[0] # No source or compressed file if not (os.path.exists(path) or os.path.lexists(dest)): state = 'absent' # if it already exists and the source file isn't there, consider this done elif not os.path.lexists(path) and os.path.lexists(dest): state = 'compress' else: if module.check_mode: if not os.path.exists(dest): changed = True else: size = 0 f_in = f_out = arcfile = None if os.path.lexists(dest): size = os.path.getsize(dest) try: if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True) arcfile.write(path, path[len(arcroot):]) arcfile.close() state = 'archive' # because all zip files are archives elif format == 'tar': arcfile = tarfile.open(dest, 'w') arcfile.add(path) arcfile.close() else: f_in = open(path, 'rb') if format == 'gz': f_out = gzip.open(dest, 'wb') elif format == 'bz2': f_out = bz2.BZ2File(dest, 'wb') elif format == 'xz': f_out = lzma.LZMAFile(dest, 'wb') else: raise OSError("Invalid format") shutil.copyfileobj(f_in, f_out) successes.append(path) except OSError as e: module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()) if arcfile: arcfile.close() if f_in: f_in.close() if f_out: f_out.close() # Rudimentary check: If size changed then file changed. Not perfect, but easy. if os.path.getsize(dest) != size: changed = True state = 'compress' if remove and not check_mode: try: os.remove(path) except OSError as e: module.fail_json(path=path, msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()) params['path'] = dest file_args = module.load_file_common_arguments(params) if not check_mode: changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths, expanded_exclude_paths=expanded_exclude_paths) if __name__ == '__main__': main()
""" Provides authorization functions for Mojang's login and session servers """ import hashlib import json # This is for python2 compatibility try: import urllib.request as request from urllib.error import URLError except ImportError: import urllib2 as request from urllib2 import URLError import logging import os from spockbot.mcp.yggdrasil import YggdrasilCore from spockbot.plugins.base import PluginBase, pl_announce logger = logging.getLogger('spockbot') # This function courtesy of barneygale def java_hex_digest(digest): d = int(digest.hexdigest(), 16) if d >> 39 * 4 & 0x8: d = "-%x" % ((-d) & (2 ** (40 * 4) - 1)) else: d = "%x" % d return d class AuthCore(object): def __init__(self, event, online_mode, auth_timeout): self.online_mode = online_mode self.auth_timeout = auth_timeout self.__event = event self.ygg = YggdrasilCore() self._shared_secret = None self._username = None def get_username(self): return self._username def set_username(self, username): self.ygg.username = username username = property(get_username, set_username) def set_password(self, password): if password and not self.online_mode: logger.warning("PASSWORD PROVIDED WITH ONLINE_MODE == FALSE") logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT") self.ygg.password = password password = property(lambda x: bool(x.ygg.password), set_password) def set_client_token(self, client_token): if not self.online_mode: logger.warning("CLIENT TOKEN PROVIDED WITH ONLINE_MODE == FALSE") logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT") self.ygg.client_token = client_token client_token = property( lambda x: bool(x.ygg.client_token), set_client_token ) def set_auth_token(self, auth_token): if not self.online_mode: logger.warning("AUTH TOKEN PROVIDED WITH ONLINE_MODE == FALSE") logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT") self.ygg.auth_token = auth_token auth_token = property( lambda x: bool(x.ygg.auth_token), set_auth_token ) def get_shared_secret(self): self._shared_secret = self._shared_secret or os.urandom(16) return self._shared_secret shared_secret = property(get_shared_secret) def start_session(self): if not self.online_mode: self._username = self.ygg.username return True if self.ygg.login(): self._username = self.ygg.selected_profile['name'] return True self.__event.emit('auth_session_error') return False def send_session_auth(self, pubkey_raw, server_id_raw): server_id = java_hex_digest(hashlib.sha1( server_id_raw.encode('ascii') + self.shared_secret + pubkey_raw )) logger.info('Attempting to authenticate with Mojang session server') url = "https://sessionserver.mojang.com/session/minecraft/join" data = json.dumps({ 'accessToken': self.ygg.access_token, 'selectedProfile': self.ygg.selected_profile, 'serverId': server_id, }).encode('utf-8') headers = {'Content-Type': 'application/json'} req = request.Request(url, data, headers) try: rep = request.urlopen( req, timeout=self.auth_timeout ).read().decode('ascii') except URLError: rep = "Couldn't connect to sessionserver.mojang.com" if rep: logger.warning('Mojang session auth response: %s', rep) logger.info('Session authentication successful') @pl_announce('Auth') class AuthPlugin(PluginBase): requires = 'Event' defaults = { 'online_mode': True, 'auth_timeout': 3, # No idea how long this should be, 3s seems good 'auth_quit': True, 'sess_quit': True, } events = { 'auth_login_error': 'handle_auth_error', 'auth_session_error': 'handle_session_error', } def __init__(self, ploader, settings): super(AuthPlugin, self).__init__(ploader, settings) self.sess_quit = self.settings['sess_quit'] self.auth_quit = self.settings['auth_quit'] ploader.provides('Auth', AuthCore( self.event, self.settings['online_mode'], self.settings['auth_timeout'] )) def handle_auth_error(self, name, data): if self.auth_quit: logger.error('AUTH: Session authentication error, calling kill') self.event.kill() def handle_session_error(self, name, data): if self.sess_quit: logger.error('AUTH: Session start error, calling kill') self.event.kill()
# -*- coding: utf-8 -*- """ The example shows how to string together several text objects. HISTORY ------- On the matplotlib-users list back in February 2012, Gökhan Sever asked the following question: Is there a way in matplotlib to partially specify the color of a string? Example: plt.ylabel("Today is cloudy.") How can I show "today" as red, "is" as green and "cloudy." as blue? Thanks. Paul Ivanov responded with this answer: """ import matplotlib.pyplot as plt from matplotlib import transforms def rainbow_text(x, y, strings, colors, ax=None, **kw): """ Take a list of ``strings`` and ``colors`` and place them next to each other, with text strings[i] being shown in colors[i]. This example shows how to do both vertical and horizontal text, and will pass all keyword arguments to plt.text, so you can set the font size, family, etc. The text will get added to the ``ax`` axes, if provided, otherwise the currently active axes will be used. """ if ax is None: ax = plt.gca() t = ax.transData canvas = ax.figure.canvas # horizontal version for s, c in zip(strings, colors): text = ax.text(x, y, " " + s + " ", color=c, transform=t, **kw) text.draw(canvas.get_renderer()) ex = text.get_window_extent() t = transforms.offset_copy(text._transform, x=ex.width, units='dots') # vertical version for s, c in zip(strings, colors): text = ax.text(x, y, " " + s + " ", color=c, transform=t, rotation=90, va='bottom', ha='center', **kw) text.draw(canvas.get_renderer()) ex = text.get_window_extent() t = transforms.offset_copy(text._transform, y=ex.height, units='dots') rainbow_text(0, 0, "all unicorns poop rainbows ! ! !".split(), ['red', 'cyan', 'brown', 'green', 'blue', 'purple', 'black'], size=18) plt.show()
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import api, fields, models, _ from openerp.exceptions import UserError class SaleOrderLine(models.Model): _inherit = "sale.order.line" @api.multi def _compute_analytic(self, domain=None): lines = {} if not domain: domain = [('so_line', 'in', self.ids), ('amount', '<=', 0.0)] data = self.env['account.analytic.line'].read_group( domain, ['so_line', 'unit_amount', 'product_uom_id'], ['product_uom_id', 'so_line'], lazy=False ) for d in data: if not d['product_uom_id']: continue line = self.browse(d['so_line'][0]) lines.setdefault(line, 0.0) uom = self.env['product.uom'].browse(d['product_uom_id'][0]) if line.product_uom.category_id == uom.category_id: qty = self.env['product.uom']._compute_qty_obj(uom, d['unit_amount'], line.product_uom) else: qty = d['unit_amount'] lines[line] += qty for line, qty in lines.items(): line.qty_delivered = qty return True class AccountAnalyticLine(models.Model): _inherit = "account.analytic.line" so_line = fields.Many2one('sale.order.line', string='Sale Order Line') def _get_invoice_price(self, order): if self.unit_amount == 0.0: return 0.0 price_unit = abs(self.amount / self.unit_amount) if self.currency_id and self.currency_id != order.currency_id: price_unit = self.currency_id.compute(price_unit, order.currency_id) return price_unit def _get_sale_order_line_vals(self): order = self.env['sale.order'].search([('project_id', '=', self.account_id.id)], limit=1) if not order: return False if order.state != 'sale': raise UserError(_('The Sale Order %s linked to the Analytic Account must be validated before registering expenses.' % order.name)) last_so_line = self.env['sale.order.line'].search([('order_id', '=', order.id)], order='sequence desc', limit=1) last_sequence = last_so_line.sequence + 1 if last_so_line else 100 fpos = order.fiscal_position_id or order.partner_id.property_account_position_id taxes = fpos.map_tax(self.product_id.taxes_id) price = self._get_invoice_price(order) return { 'order_id': order.id, 'name': self.name, 'sequence': last_sequence, 'price_unit': price, 'tax_id': [x.id for x in taxes], 'discount': 0.0, 'product_id': self.product_id.id, 'product_uom': self.product_uom_id.id, 'product_uom_qty': 0.0, 'qty_delivered': self.unit_amount, } def _get_sale_order_line(self, vals=None): result = dict(vals or {}) sol = result.get('so_line', False) or self.so_line if not sol and self.account_id and self.product_id and self.product_id.invoice_policy in ('cost', 'order'): sol = self.env['sale.order.line'].search([ ('order_id.project_id', '=', self.account_id.id), ('state', '=', 'sale'), ('product_id', '=', self.product_id.id)], limit=1) # Use the existing SO line only if the unit prices are the same, otherwise we create # a new line if sol.price_unit == self._get_invoice_price(sol.order_id): result.update({'so_line': sol.id}) else: sol = self.so_line if not sol and self.account_id and self.product_id and self.product_id.invoice_policy == 'cost': order_line_vals = self._get_sale_order_line_vals() if order_line_vals: sol = self.env['sale.order.line'].create(order_line_vals) sol._compute_tax_id() result.update({'so_line': sol.id}) return result @api.multi def write(self, values): if self._context.get('create', False): return super(AccountAnalyticLine, self).write(values) todo = self.mapped('so_line') result = super(AccountAnalyticLine, self).write(values) if 'so_line' in values: todo |= self.mapped('so_line') for line in self: res = self._get_sale_order_line(vals=values) super(AccountAnalyticLine, line).write(res) if 'so_line' in res: todo |= line.mapped('so_line') todo._compute_analytic() return result @api.model def create(self, values): line = super(AccountAnalyticLine, self).create(values) res = line._get_sale_order_line(vals=values) line.with_context(create=True).write(res) line.mapped('so_line')._compute_analytic() return line
# -*- coding: utf-8 -*- """ plumbca.logging ~~~~~~~~~~~~~~~ Implements the logging support for Plumbca. :copyright: (c) 2015 by Jason Lai. :license: BSD, see LICENSE for more details. """ import logging.config import logging from .config import DefaultConf LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'simple': {'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'}, 'debug': { 'format': '-' * 80 + '\n' + '[%(asctime)s] %(levelname)s in %(module)s [%(pathname)s:' + '%(lineno)d]:\n%(message)s\n' + '-' * 80 } }, 'filters': { 'special': {'foo': 'bar'} }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, 'activity_rotating_file': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'formatter': 'simple', 'filename': DefaultConf['activity_log'], 'backupCount': 9, 'maxBytes': 52428800 }, 'writing_rotating_file': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'formatter': 'simple', 'filename': DefaultConf['write_log'], 'backupCount': 19, 'maxBytes': 52428800 }, 'errors_rotating_file': { 'level': 'ERROR', 'class': 'logging.handlers.RotatingFileHandler', 'formatter': 'debug', 'filename': DefaultConf['errors_log'], 'backupCount': 9, 'maxBytes': 52428800 }, }, 'loggers': { 'write-opes': { 'handlers': ['console', 'writing_rotating_file'] if DefaultConf.get('debug', '') == 'yes' else ['writing_rotating_file'], 'propagate': True, 'level': 'INFO', }, 'activity': { 'handlers': ['console', 'activity_rotating_file'] if DefaultConf.get('debug', '') == 'yes' else ['activity_rotating_file'], 'propagate': True, 'level': 'INFO', }, 'errors': { 'handlers': ['console', 'errors_rotating_file'] if DefaultConf.get('debug', '') == 'yes' else ['errors_rotating_file'], 'level': 'ERROR', 'propagate': False, } } } logging.config.dictConfig(LOGGING) # activity_logger = logging.getLogger('activity') # errors_logger = logging.getLogger('errors')
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate a C++ header from ibus_input_methods.txt. This program generates a C++ header file containing the information on available input methods. It parses input_methods.txt, and then generates a static array definition from the information extracted. The input and output file names are specified on the command line. Run it like: gen_input_methods.py input_methods.txt input_methods.h It will produce output that looks like: // This file is automatically generated by gen_input_methods.py #ifndef CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ #define CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ namespace chromeos { namespace input_method { struct InputMethodsInfo { const char* input_method_id; const char* language_code; const char* xkb_keyboard_id; bool is_login_keyboard; }; const InputMethodsInfo kInputMethods[] = { {"xkb:us::eng", "en-US", "us", true}, {"xkb:us:dvorak:eng", "en-US", "us(dvorak)", true}, {"xkb:be::fra", "fr", "be", true}, {"xkb:br::por", "pt-BR", "br", true}, {"xkb:ru::rus", "ru", "ru", false}, }; } // namespace input_method } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ """ import fileinput import re import sys OUTPUT_HEADER = """// Automatically generated by gen_input_methods.py #ifndef CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ #define CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ namespace chromeos { namespace input_method { struct InputMethodsInfo { const char* input_method_id; const char* language_code; const char* xkb_layout_id; bool is_login_keyboard; }; const InputMethodsInfo kInputMethods[] = { """ CPP_FORMAT = '#if %s\n' ENGINE_FORMAT = (' {"%(input_method_id)s", "%(language_code)s", ' + '"%(xkb_layout_id)s", %(is_login_keyboard)s},\n') OUTPUT_FOOTER = """ }; } // namespace input_method } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_INPUT_METHOD_INPUT_METHODS_H_ """ def CreateEngineHeader(engines): """Create the header file from a list of engines. Arguments: engines: list of engine objects Returns: The text of a C++ header file containing the engine data. """ output = [] output.append(OUTPUT_HEADER) for engine in engines: if engine.has_key('if'): output.append(CPP_FORMAT % engine['if']) output.append(ENGINE_FORMAT % engine) if engine.has_key('if'): output.append('#endif\n') output.append(OUTPUT_FOOTER) return "".join(output) def main(argv): if len(argv) != 3: print 'Usage: gen_input_methods.py [whitelist] [output]' sys.exit(1) engines = [] for line in fileinput.input(sys.argv[1]): line = line.strip() if not line or re.match(r'#', line): continue columns = line.split() assert len(columns) == 3 or len(columns) == 4, "Invalid format: " + line engine = {} engine['input_method_id'] = columns[0] engine['xkb_layout_id'] = columns[1] engine['language_code'] = columns[2] is_login_keyboard = "false" if len(columns) == 4: assert columns[3] == "login", "Invalid attribute: " + columns[3] is_login_keyboard = "true" engine['is_login_keyboard'] = is_login_keyboard engines.append(engine) output = CreateEngineHeader(engines) output_file = open(sys.argv[2], 'w') output_file.write(output) if __name__ == '__main__': main(sys.argv)
""" MOST OF THIS CODE IS NOT USED ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE """ import os import sys # in case our module isn't installed (running from this folder) if not os.path.abspath('../../../') in sys.path: sys.path.append('../../../') # helps spyder get docs import swhlab import swhlab.common as cm import matplotlib.pyplot as plt import numpy as np import warnings # suppress VisibleDeprecationWarning warning warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) def analyzeSweep(abf,label=None): Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:] #Y=abf.sweepY[abf.pointsPerSec*.5:] AV,SD=np.average(Y),np.std(Y) dev=5 # number of stdevs from the avg to set the range R1,R2=[(AV-SD)*dev,(AV+SD)*dev] nBins=1000 hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True) histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5)) peakI=np.where(histSmooth==max(histSmooth))[0][0] # center the peak at 0 pA hist=np.roll(hist,int(nBins/2-peakI)) histSmooth=np.roll(histSmooth,int(nBins/2-peakI)) # normalize height to 1 hist,histSmooth=hist/max(histSmooth),histSmooth/max(histSmooth) plt.plot(histSmooth,label=label,lw=3,alpha=.5) if __name__=="__main__": #abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf" abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf" abf=swhlab.ABF(abfFile) abf.kernel=abf.kernel_gaussian(sizeMS=500) # kernel for smart baseline plt.figure(figsize=(10,10)) # for sweep in range(abf.sweeps): for sweep in [175,200,375]: abf.setsweep(sweep) analyzeSweep(abf,label=str(sweep)) print("Sweep",sweep) plt.legend() plt.show() print("DONE")
import sys import os.path import numpy import pickle import sqlite3 import csv import scipy.io import mat4py as m4p import array from bitarray import bitarray import time import lzw def compress(uncompressed): """Compress a string to a list of output symbols.""" # Build the dictionary. dict_size = 256 dictionary = {chr(i): i for i in range(dict_size)} #dictionary = dict((chr(i), i) for i in xrange(dict_size)) # in Python 3: dictionary = {chr(i): i for i in range(dict_size)} w = "" result = [] for c in uncompressed: wc = w + c if wc in dictionary: w = wc else: result.append(dictionary[w]) # Add wc to the dictionary. dictionary[wc] = dict_size dict_size += 1 w = c # Output the code for w. if w: result.append(dictionary[w]) return result def decompress(compressed): """Decompress a list of output ks to a string.""" from cStringIO import StringIO # Build the dictionary. dict_size = 256 dictionary = dict((i, chr(i)) for i in xrange(dict_size)) # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)} # use StringIO, otherwise this becomes O(N^2) # due to string concatenation in a loop result = StringIO() w = chr(compressed.pop(0)) result.write(w) for k in compressed: if k in dictionary: entry = dictionary[k] elif k == dict_size: entry = w + w[0] else: raise ValueError('Bad compressed k: %s' % k) result.write(entry) # Add w+entry[0] to the dictionary. dictionary[dict_size] = w + entry[0] dict_size += 1 w = entry return result.getvalue() def printSummary(file1, file2): """ printSummary() prints out the number of bytes in the original file and in the result file. @params: two files that are to be checked. @return: n/a. """ # Checks if the files exist in the current directory. if (not os.path.isfile(file1)) or (not os.path.isfile(file2)): printError(0) # Finds out how many bytes in each file. f1_bytes = os.path.getsize(file1) f2_bytes = os.path.getsize(file2) sys.stderr.write(str(file1) + ': ' + str(f1_bytes) + ' bytes\n') sys.stderr.write(str(file2) + ': ' + str(f2_bytes) + ' bytes\n') def main(): start_c = time.time() finalData = "" midData = "" file='MTP_Prev.txt' f = open(file,'rb') comp = compress(f.read()) f.close() n=len(bin(max(comp)))-2 num=2**(n+1) enviar = [] aux = [] aux2 = [] for a in comp: aux2 = lzw.inttobits(a, n+1) aux.extend(aux2) for i in range(0, len(aux), 8): r=aux[i:i+8] char=0 for p in r: char=char<<1 char=char|p enviar.append(char) final_c = time.time() print("Total time compression: " + str(final_c-start_c)) start_d = time.time() toDecompress_mid = [] pos = 0 for x in enviar: if(pos != (len(enviar)-1)): binary = lzw.inttobits(x, 8) else: binary = lzw.inttobits(x, (len(comp)*(n+1) - (pos)*8)) toDecompress_mid.extend(binary) pos += 1 #toDecompress_mid = list(map(int, toDecompress_mid)) toDecompress = [] for j in range(0, len(toDecompress_mid), n+1): l = toDecompress_mid[j: j+(n+1)] value = lzw.intfrombits(l) toDecompress.append(value) #print(toDecompress) str_decompressed = decompress(toDecompress) #print("Max of compression: " + str(max(comp))) outputFile = open("Deco_test3.txt", "wb") outputFile.write(str_decompressed) outputFile.close() final_d = time.time() print("Total time decompression: " + str(final_d-start_d)) if __name__ == '__main__': main()
import urlparse import httplib_fork as httplib from ws4py.client.threadedclient import WebSocketClient import Queue import socket import re class HttpResponse: def __init__(self, method, url, headers={}, body=None, async=False, load=True): headers = headers.copy() u = urlparse.urlparse(url) kwargs = {'timeout': 1.0} if u.scheme == 'http': conn = httplib.HTTPConnection(u.netloc, **kwargs) elif u.scheme == 'https': conn = httplib.HTTPSConnection(u.netloc, **kwargs) else: assert False, "Unsupported scheme " + u.scheme assert u.fragment == '' path = u.path + ('?' + u.query if u.query else '') self.conn = conn if not body: if method is 'POST': # The spec says: "Applications SHOULD use this field # to indicate the transfer-length of the message-body, # unless this is prohibited by the rules in section # 4.4." # http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 # While httplib sets it only if there is body. headers['Content-Length'] = 0 conn.request(method, path, headers=headers) else: if isinstance(body, unicode): body = body.encode('utf-8') conn.request(method, path, headers=headers, body=body) if load: if not async: self._load() else: self._async_load() def _get_status(self): return self.res.status status = property(_get_status) def __getitem__(self, key): return self.headers.get(key.lower()) def _load(self): self.res = self.conn.getresponse() self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() ) self.body = self.res.read() self.close() def close(self): if self.conn: self.conn.close() self.conn = None def _async_load(self): self.res = self.conn.getresponse() self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() ) def read(self): data = self.res.read(10240) if data: return data else: self.close() return None def GET(url, **kwargs): return HttpResponse('GET', url, **kwargs) def GET_async(url, **kwargs): return HttpResponse('GET', url, async=True, **kwargs) def POST(url, **kwargs): return HttpResponse('POST', url, **kwargs) def POST_async(url, **kwargs): return HttpResponse('POST', url, async=True, **kwargs) def OPTIONS(url, **kwargs): return HttpResponse('OPTIONS', url, **kwargs) class WebSocket8Client(object): class ConnectionClosedException(Exception): pass def __init__(self, url): queue = Queue.Queue() self.queue = queue class IntWebSocketClient(WebSocketClient): def received_message(self, m): queue.put(unicode(str(m), 'utf-8')) def read_from_connection(self, amount): r = super(IntWebSocketClient, self).read_from_connection(amount) if not r: queue.put(Ellipsis) return r self.client = IntWebSocketClient(url) self.client.connect() def close(self): if self.client: self.client.running = False self.client.close() self.client._th.join() self.client = None def send(self, data): self.client.send(data) def recv(self): try: r = self.queue.get(timeout=1.0) if r is Ellipsis: raise self.ConnectionClosedException() return r except: self.close() raise def recvline(s): b = [] c = None while c != '\n': c = s.recv(1) b.append( c ) return ''.join(b) class CaseInsensitiveDict(object): def __init__(self, *args, **kwargs): self.lower = {} self.d = dict(*args, **kwargs) for k in self.d: self[k] = self.d[k] def __getitem__(self, key, *args, **kwargs): pkey = self.lower.setdefault(key.lower(), key) return self.d.__getitem__(pkey, *args, **kwargs) def __setitem__(self, key, *args, **kwargs): pkey = self.lower.setdefault(key.lower(), key) return self.d.__setitem__(pkey, *args, **kwargs) def items(self): for k in self.lower.values(): yield (k, self[k]) def __repr__(self): return repr(self.d) def __str__(self): return str(self.d) def get(self, key, *args, **kwargs): pkey = self.lower.setdefault(key.lower(), key) return self.d.get(pkey, *args, **kwargs) def __contains__(self, key): pkey = self.lower.setdefault(key.lower(), key) return pkey in self.d class Response(object): def __repr__(self): return '<Response HTTP/%s %s %r %r>' % ( self.http, self.status, self.description, self.headers) def __str__(self): return repr(self) class RawHttpConnection(object): def __init__(self, url): u = urlparse.urlparse(url) self.s = socket.create_connection((u.hostname, u.port), timeout=1) def request(self, method, url, headers={}, body=None, timeout=1, http="1.1"): headers = CaseInsensitiveDict(headers) if method == 'POST': body = body or '' u = urlparse.urlparse(url) headers['Host'] = u.hostname + ':' + str(u.port) if u.port else u.hostname if body is not None: headers['Content-Length'] = str(len(body)) req = ["%s %s HTTP/%s" % (method, u.path, http)] for k, v in headers.items(): req.append( "%s: %s" % (k, v) ) req.append('') req.append('') self.s.sendall('\r\n'.join(req)) if body: self.s.sendall(body) head = recvline(self.s) r = re.match(r'HTTP/(?P<version>\S+) (?P<status>\S+) (?P<description>.*)', head) resp = Response() resp.http = r.group('version') resp.status = int(r.group('status')) resp.description = r.group('description').rstrip('\r\n') resp.headers = CaseInsensitiveDict() while True: header = recvline(self.s) if header in ['\n', '\r\n']: break k, _, v = header.partition(':') resp.headers[k] = v.lstrip().rstrip('\r\n') return resp def read(self, size=None): if size is None: # A single packet by default return self.s.recv(999999) data = [] while size > 0: c = self.s.recv(size) if not c: raise Exception('Socket closed!') size -= len(c) data.append( c ) return ''.join(data) def closed(self): # To check if socket is being closed, we need to recv and see # if the response is empty. t = self.s.settimeout(0.1) r = self.s.recv(1) == '' if not r: raise Exception('Socket not closed!') self.s.settimeout(t) return r def read_chunk(self): line = recvline(self.s).rstrip('\r\n') bytes = int(line, 16) + 2 # Additional \r\n return self.read(bytes)[:-2] def send(self, data): self.s.sendall(data)
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import logging from bs4 import NavigableString from requests import RequestException from flexget import plugin from flexget.entry import Entry from flexget.event import event from flexget.utils.cached_input import cached from flexget.utils.imdb import extract_id from flexget.utils.soup import get_soup log = logging.getLogger('sceper') class InputSceper(object): """ Uses sceper.ws category url as input. Example:: sceper: http://sceper.ws/category/movies/movies-dvd-rip """ schema = {'type': 'string', 'format': 'url'} def parse_site(self, url, task): """Parse configured url and return releases array""" try: page = task.requests.get(url).content except RequestException as e: raise plugin.PluginError('Error getting input page: %s' % e) soup = get_soup(page) releases = [] for entry in soup.find_all('div', attrs={'class': 'entry'}): release = {} title = entry.find('h2') if not title: log.debug('No h2 entrytitle') continue release['title'] = title.a.contents[0].strip() log.debug('Processing title %s' % (release['title'])) for link in entry.find_all('a'): # no content in the link if not link.contents: continue link_name = link.contents[0] if link_name is None: continue if not isinstance(link_name, NavigableString): continue link_name = link_name.strip().lower() if link.has_attr('href'): link_href = link['href'] else: continue log.debug('found link %s -> %s' % (link_name, link_href)) # handle imdb link if link_name.lower() == 'imdb': log.debug('found imdb link %s' % link_href) release['imdb_id'] = extract_id(link_href) # test if entry with this url would be rewritable by known plugins (ie. downloadable) temp = {} temp['title'] = release['title'] temp['url'] = link_href urlrewriting = plugin.get_plugin_by_name('urlrewriting') if urlrewriting['instance'].url_rewritable(task, temp): release['url'] = link_href log.trace('--> accepting %s (resolvable)' % link_href) else: log.trace('<-- ignoring %s (non-resolvable)' % link_href) # reject if no torrent link if 'url' not in release: from flexget.utils.log import log_once log_once('%s skipped due to missing or unsupported (unresolvable) download link' % (release['title']), log) else: releases.append(release) return releases @cached('sceper') @plugin.internet(log) def on_task_input(self, task, config): releases = self.parse_site(config, task) return [Entry(release) for release in releases] @event('plugin.register') def register_plugin(): plugin.register(InputSceper, 'sceper', api_ver=2)
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Record modification prior to indexing.""" from __future__ import absolute_import, print_function import copy from flask import current_app from invenio_pidrelations.contrib.records import index_siblings from invenio_pidrelations.contrib.versioning import PIDVersioning from invenio_pidrelations.serializers.utils import serialize_relations from invenio_pidstore.models import PersistentIdentifier from .api import ZenodoDeposit def indexer_receiver(sender, json=None, record=None, index=None, **dummy_kwargs): """Connect to before_record_index signal to transform record for ES. In order to avoid that a record and published deposit differs (e.g. if an embargo task updates the record), every time we index a record we also index the deposit and overwrite the content with that of the record. :param sender: Sender of the signal. :param json: JSON to be passed for the elastic search. :type json: `invenio_records.api.Deposit` :param record: Indexed deposit record. :type record: `invenio_records.api.Deposit` :param index: Elasticsearch index name. :type index: str """ if not index.startswith('deposits-records-'): return if not isinstance(record, ZenodoDeposit): record = ZenodoDeposit(record, model=record.model) if record['_deposit']['status'] == 'published': schema = json['$schema'] pub_record = record.fetch_published()[1] # Temporarily set to draft mode to ensure that `clear` can be called json['_deposit']['status'] = 'draft' json.clear() json.update(copy.deepcopy(pub_record.replace_refs())) # Set back to published mode and restore schema. json['_deposit']['status'] = 'published' json['$schema'] = schema json['_updated'] = pub_record.updated else: json['_updated'] = record.updated json['_created'] = record.created # Compute filecount and total file size files = json.get('_files', []) json['filecount'] = len(files) json['size'] = sum([f.get('size', 0) for f in files]) recid = record.get('recid') if recid: pid = PersistentIdentifier.get('recid', recid) pv = PIDVersioning(child=pid) relations = serialize_relations(pid) if pv.exists: if pv.draft_child_deposit: is_last = (pv.draft_child_deposit.pid_value == record['_deposit']['id']) relations['version'][0]['is_last'] = is_last relations['version'][0]['count'] += 1 else: relations = {'version': [{'is_last': True, 'index': 0}, ]} if relations: json['relations'] = relations def index_versioned_record_siblings(sender, action=None, pid=None, deposit=None): """Send previous version of published record for indexing.""" first_publish = (deposit.get('_deposit', {}).get('pid', {}) .get('revision_id')) == 0 if action == "publish" and first_publish: recid_pid, _ = deposit.fetch_published() current_app.logger.info(u'indexing siblings of {}', recid_pid) index_siblings(recid_pid, neighbors_eager=True)
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import logging import threading import time import psycopg2 from datetime import datetime from dateutil.relativedelta import relativedelta import pytz import openerp from openerp import SUPERUSER_ID, netsvc, api from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ from openerp.modules import load_information_from_description_file from openerp.exceptions import UserError _logger = logging.getLogger(__name__) BASE_VERSION = load_information_from_description_file('base')['version'] def str2tuple(s): return eval('tuple(%s)' % (s or '')) _intervalTypes = { 'work_days': lambda interval: relativedelta(days=interval), 'days': lambda interval: relativedelta(days=interval), 'hours': lambda interval: relativedelta(hours=interval), 'weeks': lambda interval: relativedelta(days=7*interval), 'months': lambda interval: relativedelta(months=interval), 'minutes': lambda interval: relativedelta(minutes=interval), } class ir_cron(osv.osv): """ Model describing cron jobs (also called actions or tasks). """ # TODO: perhaps in the future we could consider a flag on ir.cron jobs # that would cause database wake-up even if the database has not been # loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something) # See also openerp.cron _name = "ir.cron" _order = 'name' _columns = { 'name': fields.char('Name', required=True), 'user_id': fields.many2one('res.users', 'User', required=True), 'active': fields.boolean('Active'), 'interval_number': fields.integer('Interval Number',help="Repeat every x."), 'interval_type': fields.selection( [('minutes', 'Minutes'), ('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'), 'numbercall': fields.integer('Number of Calls', help='How many times the method is called,\na negative number indicates no limit.'), 'doall' : fields.boolean('Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts."), 'nextcall' : fields.datetime('Next Execution Date', required=True, help="Next planned execution date for this job."), 'model': fields.char('Object', help="Model name on which the method to be called is located, e.g. 'res.partner'."), 'function': fields.char('Method', help="Name of the method to be called when this job is processed."), 'args': fields.text('Arguments', help="Arguments to be passed to the method, e.g. (uid,)."), 'priority': fields.integer('Priority', help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.') } _defaults = { 'nextcall' : lambda *a: time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'priority' : 5, 'user_id' : lambda obj,cr,uid,context: uid, 'interval_number' : 1, 'interval_type' : 'months', 'numbercall' : 1, 'active' : 1, } def _check_args(self, cr, uid, ids, context=None): try: for this in self.browse(cr, uid, ids, context): str2tuple(this.args) except Exception: return False return True _constraints = [ (_check_args, 'Invalid arguments', ['args']), ] def method_direct_trigger(self, cr, uid, ids, context=None): if context is None: context={} cron_obj = self.browse(cr, uid, ids, context=context) for cron in cron_obj: self._callback(cr, uid, cron_obj.model, cron_obj.function, cron_obj.args, cron_obj.id) return True def _handle_callback_exception(self, cr, uid, model_name, method_name, args, job_id, job_exception): """ Method called when an exception is raised by a job. Simply logs the exception and rollback the transaction. :param model_name: model name on which the job method is located. :param method_name: name of the method to call when this job is processed. :param args: arguments of the method (without the usual self, cr, uid). :param job_id: job id. :param job_exception: exception raised by the job. """ cr.rollback() _logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id)) def _callback(self, cr, uid, model_name, method_name, args, job_id): """ Run the method associated to a given job It takes care of logging and exception handling. :param model_name: model name on which the job method is located. :param method_name: name of the method to call when this job is processed. :param args: arguments of the method (without the usual self, cr, uid). :param job_id: job id. """ try: args = str2tuple(args) openerp.modules.registry.RegistryManager.check_registry_signaling(cr.dbname) registry = openerp.registry(cr.dbname) if model_name in registry: model = registry[model_name] if hasattr(model, method_name): log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1) netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth) if _logger.isEnabledFor(logging.DEBUG): start_time = time.time() getattr(model, method_name)(cr, uid, *args) if _logger.isEnabledFor(logging.DEBUG): end_time = time.time() _logger.debug('%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name)) openerp.modules.registry.RegistryManager.signal_caches_change(cr.dbname) else: msg = "Method `%s.%s` does not exist." % (model_name, method_name) _logger.warning(msg) else: msg = "Model `%s` does not exist." % model_name _logger.warning(msg) except Exception, e: self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e) def _process_job(self, job_cr, job, cron_cr): """ Run a given job taking care of the repetition. :param job_cr: cursor to use to execute the job, safe to commit/rollback :param job: job to be run (as a dictionary). :param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date, must not be committed/rolled back! """ try: with api.Environment.manage(): now = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.now()) nextcall = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT)) numbercall = job['numbercall'] ok = False while nextcall < now and numbercall: if numbercall > 0: numbercall -= 1 if not ok or job['doall']: self._callback(job_cr, job['user_id'], job['model'], job['function'], job['args'], job['id']) if numbercall: nextcall += _intervalTypes[job['interval_type']](job['interval_number']) ok = True addsql = '' if not numbercall: addsql = ', active=False' cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s", (nextcall.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id'])) self.invalidate_cache(job_cr, SUPERUSER_ID) finally: job_cr.commit() cron_cr.commit() @classmethod def _acquire_job(cls, db_name): # TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py """ Try to process one cron job. This selects in database all the jobs that should be processed. It then tries to lock each of them and, if it succeeds, run the cron job (if it doesn't succeed, it means the job was already locked to be taken care of by another thread) and return. If a job was processed, returns True, otherwise returns False. """ db = openerp.sql_db.db_connect(db_name) threading.current_thread().dbname = db_name cr = db.cursor() jobs = [] try: # Make sure the database we poll has the same version as the code of base cr.execute("SELECT 1 FROM ir_module_module WHERE name=%s AND latest_version=%s", ('base', BASE_VERSION)) if cr.fetchone(): # Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1. cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') ORDER BY priority""") jobs = cr.dictfetchall() else: _logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION) except psycopg2.ProgrammingError, e: if e.pgcode == '42P01': # Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table # The table ir_cron does not exist; this is probably not an OpenERP database. _logger.warning('Tried to poll an undefined table on database %s.', db_name) else: raise except Exception: _logger.warning('Exception in cron:', exc_info=True) finally: cr.close() for job in jobs: lock_cr = db.cursor() try: # Try to grab an exclusive lock on the job row from within the task transaction # Restrict to the same conditions as for the search since the job may have already # been run by an other thread when cron is running in multi thread lock_cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') AND id=%s FOR UPDATE NOWAIT""", (job['id'],), log_exceptions=False) locked_job = lock_cr.fetchone() if not locked_job: _logger.debug("Job `%s` already executed by another process/thread. skipping it", job['name']) continue # Got the lock on the job row, run its code _logger.debug('Starting job `%s`.', job['name']) job_cr = db.cursor() try: registry = openerp.registry(db_name) registry[cls._name]._process_job(job_cr, job, lock_cr) except Exception: _logger.exception('Unexpected exception while processing cron job %r', job) finally: job_cr.close() except psycopg2.OperationalError, e: if e.pgcode == '55P03': # Class 55: Object not in prerequisite state; 55P03: lock_not_available _logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name']) continue else: # Unexpected OperationalError raise finally: # we're exiting due to an exception while acquiring the lock lock_cr.close() if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect del threading.current_thread().dbname def _try_lock(self, cr, uid, ids, context=None): """Try to grab a dummy exclusive write-lock to the rows with the given ids, to make sure a following write() or unlink() will not block due to a process currently executing those cron tasks""" try: cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table, (tuple(ids),), log_exceptions=False) except psycopg2.OperationalError: cr.rollback() # early rollback to allow translations to work for the user feedback raise UserError(_("Record cannot be modified right now: " "This cron task is currently being executed and may not be modified " "Please try again in a few minutes")) def create(self, cr, uid, vals, context=None): res = super(ir_cron, self).create(cr, uid, vals, context=context) return res def write(self, cr, uid, ids, vals, context=None): self._try_lock(cr, uid, ids, context) res = super(ir_cron, self).write(cr, uid, ids, vals, context=context) return res def unlink(self, cr, uid, ids, context=None): self._try_lock(cr, uid, ids, context) res = super(ir_cron, self).unlink(cr, uid, ids, context=context) return res def try_write(self, cr, uid, ids, values, context=None): try: with cr.savepoint(): cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table, (tuple(ids),), log_exceptions=False) except psycopg2.OperationalError: pass else: return super(ir_cron, self).write(cr, uid, ids, values, context=context) return False def toggle(self, cr, uid, ids, model, domain, context=None): active = bool(self.pool[model].search_count(cr, uid, domain, context=context)) return self.try_write(cr, uid, ids, {'active': active}, context=context)
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-05-09 13:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='UserInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('userName', models.CharField(max_length=50)), ('password', models.CharField(max_length=50)), ('contact', models.CharField(default=11111111111, max_length=16)), ('email', models.CharField(default='[email protected]', max_length=255)), ('role', models.CharField(choices=[(1, '管理员'), (2, '普通用户')], default=2, max_length=2)), ('age', models.IntegerField(default=3)), ('gender', models.BooleanField(default=False)), ('memo', models.TextField(default='Memo')), ('createDate', models.DateTimeField(auto_now_add=True)), ('updateDate', models.DateTimeField(auto_now=True)), ], ), ]
import sqlitedict import tempfile from functools import wraps import inspect import weakref class MySqliteDict(sqlitedict.SqliteDict): """ Avoids calling __del__ on an already deleted reference. """ def __del__(self): try: super(MySqliteDict, self).__del__() except: pass class SQLiteCacheBackend(object): """ A memoize-like cache backend for SPE recommendation calls. """ def __init__(self): # create tempfile _, self.fname = tempfile.mkstemp(suffix='.db') # create cache self.cache = MySqliteDict(self.fname, autocommit=True) def __del__(self): # try to remove cache file import os try: os.remove(self.fname) except: pass def __setitem__(self, key, value): self.cache[key] = value def __getitem__(self, key): return self.cache[key] def memoize(func): """ This decorator wraps func so it checks memoized cache before returning result. Note func must recieve self as first argument, as __memoize will look for the cache backend in self.memoize_backend, which is a dict like object, supporting __getitem__ and __setitem__. Usage: class Foo(MyParent, Memoized): @memoize def squared(self, x): return x**2 >>> foo = Foo(memoize_backend=MyBackend()) # default backend is SQLiteCacheBackend >>> foo.squared(10) # will store result under hash considering {'x': 10} >>> foo.squared(10) # will return result from cache >>> foo.squared(x=10) # will also return result from cache >>> foo2 = Foo(memoize_backend=dict()) # it can be a dict too If you do use a dict as backend, remember python passes dicts and lists by reference! class Foo(MyParent, Memoized): def __init__(self, memoize_backend=dict(), *args, **kwargs): self.memoize_backend = memoize_backend # wrong! all foo objects will # have the same dict as backend class Foo(MyParent, Memoized): def __init__(self, *args, **kwargs): # ... >> foo = Foo(memoize_backend=dict()) # correct """ @wraps(func) def _inner(self, *args, **kwargs): # we need to build a hashed key from func name, args and kwargs # note that @memoize will only work with instance methods d = inspect.getcallargs(func, self, *args, **kwargs) d['func'] = func.__name__ hsh = ":".join([ ":".join([str(k), str(v)]) for k,v in sorted(d.items())]) key = hash(hsh) # first try to get result from backend # use KeyError except, as stored result could be None try: result = self.memoize_backend[key] except KeyError: result = func(self, *args, **kwargs) # backend should be defined in your memoized class.__init__ self.memoize_backend[key] = result return result return _inner class Memoized(object): def __new__(cls, *args, **kwargs): cls.memoize_backend = kwargs.get('memoize_backend', SQLiteCacheBackend()) return super(Memoized, cls).__new__(cls)
from suds.client import Client from suds.transport.https import HttpAuthenticated from suds.xsd.doctor import ImportDoctor, Import from fixtures.pytest_store import store from utils import conf class MiqClient(Client): @staticmethod def pipeoptions(options_dict): """Convert a flat dict into pipe-separated key=value pairs Handy helper for making argument strings that the CFME soap API wants Doesn't handle pipes in keys or values, so don't put any in them. """ pair_list = list() for key, value in options_dict.items(): pair_list.append("{}={}".format(str(key), str(value))) return '|'.join(pair_list) def soap_client(): """ SoapClient to EVM based on base_url""" username = conf.credentials['default']['username'] password = conf.credentials['default']['password'] url = '{}/vmdbws/wsdl/'.format(store.base_url) transport = HttpAuthenticated(username=username, password=password) imp = Import('http://schemas.xmlsoap.org/soap/encoding/') doc = ImportDoctor(imp) client = MiqClient(url, transport=transport, doctor=doc) return client
import hashlib import PyRSS2Gen as pyrss from datetime import datetime, timedelta from flask import current_app as app, render_template, abort, request from flask.views import MethodView from flask_simplelogin import is_logged_in # from werkzeug.contrib.atom import AtomFeed # The werkzeug AtomFeed escapes all html tags from quokka.utils.atom import AtomFeed from .models import make_model, make_paginator, Category, Tag, Author from quokka.utils.text import ( slugify_category, normalize_var, slugify, cdata, make_external_url ) class BaseView(MethodView): def set_content_var_map(self, context, content): """Export variables from `content` to theme context example: CONTENT_VAR_MAP: author_avatar: AVATAR Will get the `article.author_avatar` and export as `AVATAR` :param: content must be a `model` of type Content """ MAP = app.theme_context.get('CONTENT_VAR_MAP', {}) for attr, variable in MAP.items(): value = getattr(content, attr, None) if value is not None: context[variable] = value def set_elements_visibility(self, context, content_type): """Set elements visibility according to content type This works with botstrap3 and malt templates Default content_types: index, article, page, category, tag, author, categories, tags, authors Custom content types: Any category, page or article can be accepted `blog/news` or `blog/news/my-article` """ if not content_type: return CONTENT_TYPE = normalize_var(content_type).upper() context['CONTENT_TYPE'] = content_type for rule in app.theme_context.get('DYNAMIC_VARS', []): where = rule.get('where') var_list = rule.get('var') if not where or not var_list: continue if not isinstance(var_list, list): var_list = [var_list] if not isinstance(where, list): where = [where] WHERE = [normalize_var(item).upper() for item in where] if CONTENT_TYPE in WHERE: for var in var_list: context[var] = rule.get('value', True) # content specific visibility items content = context.get('content') if content: # comments visibility control hide = False disqus_sitename = app.theme_context.get('DISQUS_SITENAME') if 'HIDE_COMMENTS' in app.theme_context: hide = app.theme_context['HIDE_COMMENTS'] if 'HIDE_COMMENTS' in context: hide = context['HIDE_COMMENTS'] else: context['HIDE_COMMENTS'] = hide if content.comments in ('closed', False): hide = True elif content.comments in ('opened', True): hide = False if hide is True: context['HIDE_COMMENTS'] = True context['DISQUS_SITENAME'] = False else: context['HIDE_COMMENTS'] = False context['DISQUS_SITENAME'] = disqus_sitename class ArticleListView(BaseView): def get(self, category=None, tag=None, author=None, page_number=1, ext=None): context = {} query = {'published': True} home_template = app.theme_context.get('HOME_TEMPLATE') list_categories = app.theme_context.get('LIST_CATEGORIES', []) index_category = app.theme_context.get('INDEX_CATEGORY') content_type = 'index' template = custom_template = 'index.html' ext = ext or app.config.get('CONTENT_EXTENSION', 'html') FEED_ALL_ATOM = app.theme_context.get('FEED_ALL_ATOM') FEED_ALL_RSS = app.theme_context.get('FEED_ALL_RSS') if category: FEED_ALL_ATOM = f"{category}/index.atom" FEED_ALL_RSS = f"{category}/index.rss" content_type = 'category' custom_template = f'{content_type}/{normalize_var(category)}.html' if category != index_category: query['category_slug'] = {'$regex': f"^{category.rstrip('/')}"} if category not in list_categories: template = 'category.html' else: content_type = 'index' else: content_type = 'index' elif tag: FEED_ALL_ATOM = f"tag/{tag}/index.atom" FEED_ALL_RSS = f"tag/{tag}/index.rss" content_type = 'tag' custom_template = f'{content_type}/{normalize_var(tag)}.html' template = 'tag.html' # https://github.com/schapman1974/tinymongo/issues/42 query['tags_string'] = {'$regex': f'.*,{tag},.*'} elif author: FEED_ALL_ATOM = f"author/{author}/index.atom" FEED_ALL_RSS = f"author/{author}/index.rss" content_type = 'author' custom_template = f'{content_type}/{normalize_var(author)}.html' template = 'author.html' # https://github.com/schapman1974/tinymongo/issues/42 author_slugs = author.split('/') if len(author_slugs) > 1: query['$or'] = [ {'authors_string': {'$regex': f'.*,{author_slug},.*'}} for author_slug in author_slugs ] else: query['authors_string'] = {'$regex': f'.*,{author},.*'} elif home_template: # use custom template only when categoty is blank '/' # and INDEX_TEMPLATE is defined template = home_template custom_template = f'{content_type}/{home_template}.html' content_type = 'home' articles = [ make_model(article) for article in app.db.article_set(query) ] if content_type not in ['index', 'home', 'direct', 'author'] and not articles: # noqa # on `index`, `home` and direct templates no need for articles # but category pages should never show empty unless it is author # profile abort(404) page_name = '' if category: page_name = category elif tag: page_name = f'tag/{tag}' elif author: page_name = f'author/{author}' paginator = make_paginator(articles, name=page_name) page = paginator.page(page_number) context.update( { 'articles': articles, 'page_name': page_name, 'category': Category(category) if category else None, 'tag': Tag(tag) if tag else None, 'author': Author(author) if author else None, 'articles_paginator': paginator, 'articles_page': page, 'articles_next_page': page.next_page, 'articles_previous_page': page.previous_page, 'FEED_ALL_ATOM': FEED_ALL_ATOM, 'FEED_ALL_RSS': FEED_ALL_RSS } ) self.set_elements_visibility(context, content_type) self.set_elements_visibility(context, category) templates = [f'custom/{custom_template}', template] return self.render(ext, content_type, templates, **context) def render(self, ext, content_type, templates, **context): extension_map = app.config.get('CONTENT_EXTENSION_MAP', {}) method_name = extension_map.get(ext, 'render_template') return getattr(self, method_name)(content_type, templates, **context) def render_template(self, content_type, templates, **context): return render_template(templates, **context) def render_atom(self, content_type, templates, **context): feed_name = ( f"{app.theme_context.get('SITENAME')}" f" | {content_type.title()} | atom feed" ) if context.get('articles_page'): contents = context['articles_page'].object_list else: contents = context['articles'] feed = AtomFeed( feed_name, feed_url=request.url, url=request.url_root ) for content in contents: content = make_model(content) feed.add( content.title, cdata(content.content), content_type="html", author=content.author, url=make_external_url(content.url), updated=content.modified, published=content.date ) return feed.get_response() def render_rss(self, content_type, templates, **context): feed_name = description = ( f"{app.theme_context.get('SITENAME')}" f" | {content_type.title()} | RSS feed" ) if context.get('articles_page'): contents = context['articles_page'].object_list else: contents = context['articles'] rss = pyrss.RSS2( title=feed_name, link=request.url_root, description=description, language=app.config.get('RSS_LANGUAGE', 'en-us'), copyright=app.config.get('RSS_COPYRIGHT', 'All rights reserved.'), lastBuildDate=datetime.now(), categories=[str(context.get('tag') or context.get('category'))], ) # set rss.pubDate to the newest post in the collection # back 10 years in the past rss_pubdate = datetime.today() - timedelta(days=365 * 10) for content in contents: content = make_model(content) if content.date > rss_pubdate: rss_pubdate = content.date rss.items.append( pyrss.RSSItem( title=content.title, link=make_external_url(content.url), description=cdata(content.content), author=str(content.author), categories=[str(content.tags)], guid=hashlib.sha1( content.title.encode('utf-8') + content.url.encode('utf-8') ).hexdigest(), pubDate=content.date, ) ) # set the new published date after iterating the contents rss.pubDate = rss_pubdate return rss.to_xml(encoding=app.config.get('RSS_ENCODING', 'utf-8')) class CategoryListView(BaseView): def build_query(self, cat): query = {'published': True} if cat == app.theme_context.get('INDEX_CATEGORY', 'index'): return query if cat: query['category_slug'] = { '$regex': f"^{slugify_category(cat).rstrip('/')}" } else: query['category_slug'] = cat return query def get(self, ext=None): categories = [ ( Category(cat), [ make_model(article) for article in app.db.article_set(self.build_query(cat)) ] ) for cat in app.db.category_set( filter={'published': True} ) + ['index'] ] context = { 'categories': categories } self.set_elements_visibility(context, 'categories') return render_template('categories.html', **context) class TagListView(BaseView): def get(self, page_number=1, ext=None): tags = [ (Tag(tag), []) for tag in app.db.tag_set(filter={'published': True}) ] context = {'tags': tags} self.set_elements_visibility(context, 'tags') return render_template('tags.html', **context) class AuthorListView(BaseView): def get(self, ext=None): authors = [ ( Author(author), [ make_model(article) for article in app.db.article_set( {'authors_string': { '$regex': f'.*,{slugify(author)},.*'}, 'published': True} ) ] ) for author in app.db.author_set(filter={'published': True}) ] context = { 'authors': authors } self.set_elements_visibility(context, 'authors') return render_template('authors.html', **context) class DetailView(BaseView): is_preview = False def get(self, slug, ext=None): category, _, item_slug = slug.rpartition('/') content = app.db.get_with_content( slug=item_slug, category_slug=category ) if not content: abort(404) content = make_model(content) if content.status == 'draft' and not self.is_preview: abort(404) if self.is_preview and not is_logged_in(): # access denied abort(403) context = { 'category': content.category, 'author': content.author, 'content': content, content.content_type: content } self.set_elements_visibility(context, content.content_type) self.set_elements_visibility(context, slug) self.set_content_var_map(context, content) templates = [ f'custom/{content.content_type}/{normalize_var(slug)}.html', f'{content.content_type}.html' ] return render_template(templates, **context) class PreviewView(DetailView): is_preview = True
# -*- coding: utf-8 -*- # Copyright 2017 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from openerp import models, fields, api class BuktiPotongPPhF113308Out(models.Model): _name = "l10n_id.bukti_potong_pph_f113308_out" _inherit = "l10n_id.bukti_potong_pph" _table = "l10n_id_bukti_potong_pph" _description = "Bukti Potong PPh f.1.1.33.08 Out" @api.model def _default_type_id(self): return self.env.ref( "l10n_id_taxform_bukti_potong_pph_f113308." "bukti_potong_pph_type_f113308_out").id type_id = fields.Many2one( default=lambda self: self._default_type_id(), ) @api.model def search(self, args, offset=0, limit=None, order=None, count=False): type_id = self.env.ref( "l10n_id_taxform_bukti_potong_pph_f113308." "bukti_potong_pph_type_f113308_out") args.append(("type_id", "=", type_id.id)) return super(BuktiPotongPPhF113308Out, self).search( args=args, offset=offset, limit=limit, order=order, count=count)
import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = () SENTRY_ADMINS = ( # your email here ) MANAGERS = ADMINS SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) TIME_ZONE = 'America/Los_Angeles' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = False USE_L10N = False MEDIA_ROOT = '/var/www/watchdog/media/' MEDIA_URL = '/site-media/' ADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_DIRS = ( os.path.join(SITE_ROOT, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'south', 'indexer', 'paging', 'sentry', 'sentry.client', 'staticfiles', )
# -*- coding: utf-8 -*- import io import json import logging import os import re from babelfish import Language, language_converters from datetime import datetime, timedelta from dogpile.cache.api import NO_VALUE from guessit import guessit import pytz import rarfile from rarfile import RarFile, is_rarfile from rebulk.loose import ensure_list from requests import Session from zipfile import ZipFile, is_zipfile from . import ParserBeautifulSoup, Provider from ..cache import SHOW_EXPIRATION_TIME, region from ..exceptions import AuthenticationError, ConfigurationError, ProviderError, ServiceUnavailable from ..matches import guess_matches from ..subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending from ..utils import sanitize from ..video import Episode, Movie logger = logging.getLogger(__name__) language_converters.register('legendastv = subliminal.converters.legendastv:LegendasTVConverter') # Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile` rarfile.PATH_SEP = '/' #: Conversion map for types type_map = {'M': 'movie', 'S': 'episode', 'C': 'episode'} #: BR title season parsing regex season_re = re.compile(r' - (?P<season>\d+)(\xaa|a|st|nd|rd|th) (temporada|season)', re.IGNORECASE) #: Downloads parsing regex downloads_re = re.compile(r'(?P<downloads>\d+) downloads') #: Rating parsing regex rating_re = re.compile(r'nota (?P<rating>\d+)') #: Timestamp parsing regex timestamp_re = re.compile(r'(?P<day>\d+)/(?P<month>\d+)/(?P<year>\d+) - (?P<hour>\d+):(?P<minute>\d+)') #: Title with year/country regex title_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$') #: Cache key for releases releases_key = __name__ + ':releases|{archive_id}|{archive_name}' class LegendasTVArchive(object): """LegendasTV Archive. :param str id: identifier. :param str name: name. :param bool pack: contains subtitles for multiple episodes. :param bool pack: featured. :param str link: link. :param int downloads: download count. :param int rating: rating (0-10). :param timestamp: timestamp. :type timestamp: datetime.datetime """ def __init__(self, id, name, pack, featured, link, downloads=0, rating=0, timestamp=None): #: Identifier self.id = id #: Name self.name = name #: Pack self.pack = pack #: Featured self.featured = featured #: Link self.link = link #: Download count self.downloads = downloads #: Rating (0-10) self.rating = rating #: Timestamp self.timestamp = timestamp #: Compressed content as :class:`rarfile.RarFile` or :class:`zipfile.ZipFile` self.content = None def __repr__(self): return '<%s [%s] %r>' % (self.__class__.__name__, self.id, self.name) class LegendasTVSubtitle(Subtitle): """LegendasTV Subtitle.""" provider_name = 'legendastv' def __init__(self, language, type, title, year, imdb_id, season, archive, name): super(LegendasTVSubtitle, self).__init__(language, page_link=archive.link) self.type = type self.title = title self.year = year self.imdb_id = imdb_id self.season = season self.archive = archive self.name = name @property def id(self): return '%s-%s' % (self.archive.id, self.name.lower()) @property def info(self): return self.name def get_matches(self, video, hearing_impaired=False): matches = guess_matches(video, { 'title': self.title, 'year': self.year }) # episode if isinstance(video, Episode) and self.type == 'episode': # imdb_id if video.series_imdb_id and self.imdb_id == video.series_imdb_id: matches.add('series_imdb_id') # movie elif isinstance(video, Movie) and self.type == 'movie': # imdb_id if video.imdb_id and self.imdb_id == video.imdb_id: matches.add('imdb_id') # name matches |= guess_matches(video, guessit(self.name, {'type': self.type})) return matches class LegendasTVProvider(Provider): """LegendasTV Provider. :param str username: username. :param str password: password. """ languages = {Language.fromlegendastv(l) for l in language_converters['legendastv'].codes} server_url = 'http://legendas.tv/' subtitle_class = LegendasTVSubtitle def __init__(self, username=None, password=None): # Provider needs UNRAR installed. If not available raise ConfigurationError try: rarfile.custom_check([rarfile.UNRAR_TOOL], True) except rarfile.RarExecError: raise ConfigurationError('UNRAR tool not available') if any((username, password)) and not all((username, password)): raise ConfigurationError('Username and password must be specified') self.username = username self.password = password self.logged_in = False self.session = None def initialize(self): self.session = Session() self.session.headers['User-Agent'] = self.user_agent # login if self.username and self.password: logger.info('Logging in') data = {'_method': 'POST', 'data[User][username]': self.username, 'data[User][password]': self.password} r = self.session.post(self.server_url + 'login', data, allow_redirects=False, timeout=10) raise_for_status(r) soup = ParserBeautifulSoup(r.content, ['html.parser']) if soup.find('div', {'class': 'alert-error'}, string=re.compile(u'Usuário ou senha inválidos')): raise AuthenticationError(self.username) logger.debug('Logged in') self.logged_in = True def terminate(self): # logout if self.logged_in: logger.info('Logging out') r = self.session.get(self.server_url + 'users/logout', allow_redirects=False, timeout=10) raise_for_status(r) logger.debug('Logged out') self.logged_in = False self.session.close() @staticmethod def is_valid_title(title, title_id, sanitized_title, season, year): """Check if is a valid title.""" sanitized_result = sanitize(title['title']) if sanitized_result != sanitized_title: logger.debug("Mismatched title, discarding title %d (%s)", title_id, sanitized_result) return # episode type if season: # discard mismatches on type if title['type'] != 'episode': logger.debug("Mismatched 'episode' type, discarding title %d (%s)", title_id, sanitized_result) return # discard mismatches on season if 'season' not in title or title['season'] != season: logger.debug('Mismatched season %s, discarding title %d (%s)', title.get('season'), title_id, sanitized_result) return # movie type else: # discard mismatches on type if title['type'] != 'movie': logger.debug("Mismatched 'movie' type, discarding title %d (%s)", title_id, sanitized_result) return # discard mismatches on year if year is not None and 'year' in title and title['year'] != year: logger.debug("Mismatched movie year, discarding title %d (%s)", title_id, sanitized_result) return return True @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value) def search_titles(self, title, season, title_year): """Search for titles matching the `title`. For episodes, each season has it own title :param str title: the title to search for. :param int season: season of the title :param int title_year: year of the title :return: found titles. :rtype: dict """ titles = {} sanitized_titles = [sanitize(title)] ignore_characters = {'\'', '.'} if any(c in title for c in ignore_characters): sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters)) for sanitized_title in sanitized_titles: # make the query if season: logger.info('Searching episode title %r for season %r', sanitized_title, season) else: logger.info('Searching movie title %r', sanitized_title) r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10) raise_for_status(r) results = json.loads(r.text) # loop over results for result in results: source = result['_source'] # extract id title_id = int(source['id_filme']) # extract type title = {'type': type_map[source['tipo']]} # extract title, year and country name, year, country = title_re.match(source['dsc_nome']).groups() title['title'] = name # extract imdb_id if source['id_imdb'] != '0': if not source['id_imdb'].startswith('tt'): title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7) else: title['imdb_id'] = source['id_imdb'] # extract season if title['type'] == 'episode': if source['temporada'] and source['temporada'].isdigit(): title['season'] = int(source['temporada']) else: match = season_re.search(source['dsc_nome_br']) if match: title['season'] = int(match.group('season')) else: logger.debug('No season detected for title %d (%s)', title_id, name) # extract year if year: title['year'] = int(year) elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit(): # year is based on season air date hence the adjustment title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1 # add title only if is valid # Check against title without ignored chars if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year): titles[title_id] = title logger.debug('Found %d titles', len(titles)) return titles @region.cache_on_arguments(expiration_time=timedelta(minutes=15).total_seconds()) def get_archives(self, title_id, language_code, title_type, season, episodes): """Get the archive list from a given `title_id`, `language_code`, `title_type`, `season` and `episode`. :param int title_id: title id. :param int language_code: language code. :param str title_type: episode or movie :param int season: season :param list episodes: episodes :return: the archives. :rtype: list of :class:`LegendasTVArchive` """ archives = [] page = 0 while True: # get the archive page url = self.server_url + 'legenda/busca/-/{language}/-/{page}/{title}'.format( language=language_code, page=page, title=title_id) r = self.session.get(url) raise_for_status(r) # parse the results soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) for archive_soup in soup.select('div.list_element > article > div > div.f_left'): # create archive archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, 'pack' in archive_soup.parent['class'], 'destaque' in archive_soup.parent['class'], self.server_url + archive_soup.a['href'][1:]) # clean name of path separators and pack flags clean_name = archive.name.replace('/', '-') if archive.pack and clean_name.startswith('(p)'): clean_name = clean_name[3:] # guess from name guess = guessit(clean_name, {'type': title_type}) # episode if season and episodes: # discard mismatches on episode in non-pack archives # Guessit may return int for single episode or list for multi-episode # Check if archive name has multiple episodes releases on it if not archive.pack and 'episode' in guess: wanted_episode = set(episodes) archive_episode = set(ensure_list(guess['episode'])) if not wanted_episode.intersection(archive_episode): logger.debug('Mismatched episode %s, discarding archive: %s', guess['episode'], clean_name) continue # extract text containing downloads, rating and timestamp data_text = archive_soup.find('p', class_='data').text # match downloads archive.downloads = int(downloads_re.search(data_text).group('downloads')) # match rating match = rating_re.search(data_text) if match: archive.rating = int(match.group('rating')) # match timestamp and validate it time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()} archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data)) if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc): raise ProviderError('Archive timestamp is in the future') # add archive logger.info('Found archive for title %d and language %d at page %s: %s', title_id, language_code, page, archive) archives.append(archive) # stop on last page if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None: break # increment page count page += 1 logger.debug('Found %d archives', len(archives)) return archives def download_archive(self, archive): """Download an archive's :attr:`~LegendasTVArchive.content`. :param archive: the archive to download :attr:`~LegendasTVArchive.content` of. :type archive: :class:`LegendasTVArchive` """ logger.info('Downloading archive %s', archive.id) r = self.session.get(self.server_url + 'downloadarquivo/{}'.format(archive.id)) raise_for_status(r) # open the archive archive_stream = io.BytesIO(r.content) if is_rarfile(archive_stream): logger.debug('Identified rar archive') archive.content = RarFile(archive_stream) elif is_zipfile(archive_stream): logger.debug('Identified zip archive') archive.content = ZipFile(archive_stream) else: raise ValueError('Not a valid archive') def query(self, language, title, season=None, episodes=None, year=None): # search for titles titles = self.search_titles(title, season, year) subtitles = [] # iterate over titles for title_id, t in titles.items(): logger.info('Getting archives for title %d and language %d', title_id, language.legendastv) archives = self.get_archives(title_id, language.legendastv, t['type'], season, episodes or []) if not archives: logger.info('No archives found for title %d and language %d', title_id, language.legendastv) # iterate over title's archives for a in archives: # compute an expiration time based on the archive timestamp expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds() # attempt to get the releases from the cache cache_key = releases_key.format(archive_id=a.id, archive_name=a.name) releases = region.get(cache_key, expiration_time=expiration_time) # the releases are not in cache or cache is expired if releases == NO_VALUE: logger.info('Releases not found in cache') # download archive self.download_archive(a) # extract the releases releases = [] for name in a.content.namelist(): # discard the legendastv file if name.startswith('Legendas.tv'): continue # discard hidden files if os.path.split(name)[-1].startswith('.'): continue # discard non-subtitle files if not name.lower().endswith(SUBTITLE_EXTENSIONS): continue releases.append(name) # cache the releases region.set(cache_key, releases) # iterate over releases for r in releases: subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'), t.get('season'), a, r) logger.debug('Found subtitle %r', subtitle) subtitles.append(subtitle) return subtitles def list_subtitles(self, video, languages): season = None episodes = [] if isinstance(video, Episode): titles = [video.series] + video.alternative_series season = video.season episodes = video.episodes else: titles = [video.title] + video.alternative_titles for title in titles: subtitles = [s for l in languages for s in self.query(l, title, season=season, episodes=episodes, year=video.year)] if subtitles: return subtitles return [] def download_subtitle(self, subtitle): # download archive in case we previously hit the releases cache and didn't download it if subtitle.archive.content is None: self.download_archive(subtitle.archive) # extract subtitle's content subtitle.content = fix_line_ending(subtitle.archive.content.read(subtitle.name)) def raise_for_status(r): # When site is under maintaince and http status code 200. if 'Em breve estaremos de volta' in r.text: raise ServiceUnavailable else: r.raise_for_status()
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-01-18 17:05 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0054_auto_20160117_1901'), ] operations = [ migrations.CreateModel( name='BandSawBlade', fields=[ ('sawblade_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='products.SawBlade')), ('type_description', models.CharField(blank=True, max_length=255, verbose_name='Typ Beschreibung')), ('type2', models.CharField(blank=True, max_length=255, verbose_name='2. Typ')), ('type2_description', models.CharField(blank=True, max_length=255, verbose_name='2. Typ Beschreibung')), ('image2', models.ImageField(blank=True, null=True, upload_to='', verbose_name='2. Produktabbildung')), ], options={ 'verbose_name_plural': 'Sägebänder', 'verbose_name': 'Sägeband', }, bases=('products.sawblade',), ), migrations.CreateModel( name='BandSawBladeIndicator', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=255, verbose_name='Kennziffer')), ('width', models.IntegerField(blank=True, default=0, verbose_name='Breite')), ('strength', models.FloatField(blank=True, default=0, verbose_name='Stärke')), ('E', models.CharField(blank=True, max_length=255, verbose_name='E')), ('G', models.CharField(blank=True, max_length=255, verbose_name='G')), ('H', models.CharField(blank=True, max_length=255, verbose_name='H')), ('I', models.CharField(blank=True, max_length=255, verbose_name='I')), ('J', models.CharField(blank=True, max_length=255, verbose_name='J')), ('L', models.CharField(blank=True, max_length=255, verbose_name='L')), ('N', models.CharField(blank=True, max_length=255, verbose_name='N')), ('O', models.CharField(blank=True, max_length=255, verbose_name='O')), ('T', models.CharField(blank=True, max_length=255, verbose_name='T')), ('U', models.CharField(blank=True, max_length=255, verbose_name='U')), ('V', models.CharField(blank=True, max_length=255, verbose_name='V')), ('W', models.CharField(blank=True, max_length=255, verbose_name='W')), ], ), migrations.AddField( model_name='bandsawblade', name='bandsaw_indicators', field=models.ManyToManyField(blank=True, to='products.BandSawBladeIndicator', verbose_name='Kenziffern'), ), ]
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import time import random from sqlalchemy import event, exc, select from airflow.utils.log.logging_mixin import LoggingMixin log = LoggingMixin().log def setup_event_handlers( engine, reconnect_timeout_seconds, initial_backoff_seconds=0.2, max_backoff_seconds=120): @event.listens_for(engine, "engine_connect") def ping_connection(connection, branch): """ Pessimistic SQLAlchemy disconnect handling. Ensures that each connection returned from the pool is properly connected to the database. http://docs.sqlalchemy.org/en/rel_1_1/core/pooling.html#disconnect-handling-pessimistic """ if branch: # "branch" refers to a sub-connection of a connection, # we don't want to bother pinging on these. return start = time.time() backoff = initial_backoff_seconds # turn off "close with result". This flag is only used with # "connectionless" execution, otherwise will be False in any case save_should_close_with_result = connection.should_close_with_result while True: connection.should_close_with_result = False try: connection.scalar(select([1])) # If we made it here then the connection appears to be healty break except exc.DBAPIError as err: if time.time() - start >= reconnect_timeout_seconds: log.error( "Failed to re-establish DB connection within %s secs: %s", reconnect_timeout_seconds, err) raise if err.connection_invalidated: log.warning("DB connection invalidated. Reconnecting...") # Use a truncated binary exponential backoff. Also includes # a jitter to prevent the thundering herd problem of # simultaneous client reconnects backoff += backoff * random.random() time.sleep(min(backoff, max_backoff_seconds)) # run the same SELECT again - the connection will re-validate # itself and establish a new connection. The disconnect detection # here also causes the whole connection pool to be invalidated # so that all stale connections are discarded. continue else: log.error( "Unknown database connection error. Not retrying: %s", err) raise finally: # restore "close with result" connection.should_close_with_result = save_should_close_with_result @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid {}, " "attempting to check out in pid {}".format(connection_record.info['pid'], pid) )
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-05-27 14:48 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('member_calendar', '0001_initial'), ] operations = [ migrations.AddField( model_name='membercalendarevent', name='location_city', field=models.CharField(default='', max_length=255), preserve_default=False, ), migrations.AddField( model_name='membercalendarevent', name='location_state', field=models.CharField(default='', max_length=100), preserve_default=False, ), migrations.AddField( model_name='membercalendarevent', name='location_street_address', field=models.CharField(default='', max_length=255), preserve_default=False, ), migrations.AddField( model_name='membercalendarevent', name='location_zip_code', field=models.CharField(default='', max_length=5), preserve_default=False, ), ]
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.tap_bridge', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate', import_from_module='ns.network') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper [class] module.add_class('TapBridgeHelper') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## system-thread.h (module 'core'): ns3::SystemThread [class] module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## unix-fd-reader.h (module 'core'): ns3::FdReader [class] module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge [class] module.add_class('TapBridge', parent=root_module['ns3::NetDevice']) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::Mode [enumeration] module.add_enum('Mode', ['ILLEGAL', 'CONFIGURE_LOCAL', 'USE_LOCAL', 'USE_BRIDGE'], outer_class=root_module['ns3::TapBridge']) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader [class] module.add_class('TapBridgeFdReader', parent=root_module['ns3::FdReader']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TapBridgeHelper_methods(root_module, root_module['ns3::TapBridgeHelper']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TapBridge_methods(root_module, root_module['ns3::TapBridge']) register_Ns3TapBridgeFdReader_methods(root_module, root_module['ns3::TapBridgeFdReader']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBitsTxTime(uint32_t bits) const [member function] cls.add_method('CalculateBitsTxTime', 'ns3::Time', [param('uint32_t', 'bits')], is_const=True) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBytesTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateBytesTxTime', 'ns3::Time', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], deprecated=True, is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TapBridgeHelper_methods(root_module, cls): ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper(ns3::TapBridgeHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridgeHelper const &', 'arg0')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper() [constructor] cls.add_constructor([]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::TapBridgeHelper::TapBridgeHelper(ns3::Ipv4Address gateway) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'gateway')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(std::string nodeName, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('std::string', 'nodeName'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, std::string ndName) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('std::string', 'ndName')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(std::string nodeName, std::string ndName) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('std::string', 'nodeName'), param('std::string', 'ndName')]) ## tap-bridge-helper.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::NetDevice> nd, ns3::AttributeValue const & bridgeType) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('ns3::AttributeValue const &', 'bridgeType')]) ## tap-bridge-helper.h (module 'tap-bridge'): void ns3::TapBridgeHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SystemThread_methods(root_module, cls): ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemThread const &', 'arg0')]) ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor] cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function] cls.add_method('Equals', 'bool', [param('pthread_t', 'id')], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function] cls.add_method('Join', 'void', []) ## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function] cls.add_method('Self', 'pthread_t', [], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3FdReader_methods(root_module, cls): ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::FdReader const &', 'arg0')]) ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor] cls.add_constructor([]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function] cls.add_method('Start', 'void', [param('int', 'fd'), param('ns3::Callback< void, unsigned char *, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function] cls.add_method('Stop', 'void', []) ## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3TapBridge_methods(root_module, cls): ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::TapBridge(ns3::TapBridge const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridge const &', 'arg0')]) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::TapBridge() [constructor] cls.add_constructor([]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::NetDevice> ns3::TapBridge::GetBridgedNetDevice() [member function] cls.add_method('GetBridgedNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::Channel> ns3::TapBridge::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): uint32_t ns3::TapBridge::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridge::Mode ns3::TapBridge::GetMode() [member function] cls.add_method('GetMode', 'ns3::TapBridge::Mode', []) ## tap-bridge.h (module 'tap-bridge'): uint16_t ns3::TapBridge::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Address ns3::TapBridge::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): ns3::Ptr<ns3::Node> ns3::TapBridge::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): static ns3::TypeId ns3::TapBridge::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetBridgedNetDevice(ns3::Ptr<ns3::NetDevice> bridgedDevice) [member function] cls.add_method('SetBridgedNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'bridgedDevice')]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetMode(ns3::TapBridge::Mode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::TapBridge::Mode', 'mode')]) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::Start(ns3::Time tStart) [member function] cls.add_method('Start', 'void', [param('ns3::Time', 'tStart')]) ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::Stop(ns3::Time tStop) [member function] cls.add_method('Stop', 'void', [param('ns3::Time', 'tStop')]) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::DiscardFromBridgedDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & src) [member function] cls.add_method('DiscardFromBridgedDevice', 'bool', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'src')], visibility='protected') ## tap-bridge.h (module 'tap-bridge'): void ns3::TapBridge::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## tap-bridge.h (module 'tap-bridge'): bool ns3::TapBridge::ReceiveFromBridgedDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & src, ns3::Address const & dst, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('ReceiveFromBridgedDevice', 'bool', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'src'), param('ns3::Address const &', 'dst'), param('ns3::NetDevice::PacketType', 'packetType')], visibility='protected') return def register_Ns3TapBridgeFdReader_methods(root_module, cls): ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader::TapBridgeFdReader() [constructor] cls.add_constructor([]) ## tap-bridge.h (module 'tap-bridge'): ns3::TapBridgeFdReader::TapBridgeFdReader(ns3::TapBridgeFdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapBridgeFdReader const &', 'arg0')]) ## tap-bridge.h (module 'tap-bridge'): ns3::FdReader::Data ns3::TapBridgeFdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], visibility='private', is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
import time from PyQt4 import QtCore from PyQt4.QtCore import QEvent class Clock(object): def __init__(self): self.reset() @staticmethod def sysTime(): #print time.time() return time.time() def reset(self): self._reference = Clock.sysTime() def get(self): return Clock.sysTime() - self._reference; class Timer(Clock): def __init__(self, interval=0, start=False, func=None, repeat=False): super(Timer, self).__init__() self.repeat = repeat self.interval = interval self.func = func if start: self._reference = Clock.sysTime() def reset(self): self._reference = 0 def start(self, interval=None, func=None, repeat=False): self._reference = Clock.sysTime() if interval is not None: self.interval = interval self.func = func self.repeat = repeat def update(self): if not self.started(): return False now = Clock.sysTime() if self._reference + self.interval <= now: self._reference = now if self.repeat else 0 if self.func is not None: self.func() return True return False def started(self): return self._reference != 0 class KeyAdapter(object): def __init__(self): self.pressed = set([]) def keyEvent(self, e): #if e.isAutoRepeat(): # return if e.text() != "": key = str(e.text().lower()[0]) if e.type() == QEvent.KeyPress: self.pressed |= set([key]) elif e.type() == QEvent.KeyRelease: self.pressed -= set([key]) specialKeys = { QtCore.Qt.Key_Alt: "alt", QtCore.Qt.Key_Control: "control", QtCore.Qt.Key_Shift: "shift", QtCore.Qt.Key_Left: "left", QtCore.Qt.Key_Right: "right", QtCore.Qt.Key_Up: "up", QtCore.Qt.Key_Down: "down" } for k, v in specialKeys.items(): if e.key() == k and e.type() == QEvent.KeyPress: self.pressed |= set([v]) elif e.key() == k and e.type() == QEvent.KeyRelease: self.pressed -= set([v]) def nested_merge(a, b): """ merges b into a. a has priority """ if isinstance(a, dict) and isinstance(b, dict): for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): nested_merge(a[key], b[key]) elif isinstance(a[key], list) and isinstance(b[key], list): nested_merge(a[key], b[key]) else: a[key] = b[key] return a elif isinstance(a, list) and isinstance(b, list): for i in range(len(b)): if i > len(a) - 1: a.append(b[i]) else: if isinstance(a[i], dict) and isinstance(b[i], dict): nested_merge(a[i], b[i]) elif isinstance(a[i], list) and isinstance(b[i], list): nested_merge(a[i], b[i]) return a else: raise ValueError() import re def __atoi(text): return int(text) if text.isdigit() else text def __natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [ __atoi(c) for c in re.split('(\d+)', text) ] def naturalSorted(l): return sorted(l, key=__natural_keys) import operator import math class Vec2d(object): """2d vector class, supports vector and scalar operators, and also provides a bunch of high level functions """ __slots__ = ['x', 'y'] def __init__(self, x_or_pair, y = None): if y == None: self.x = x_or_pair[0] self.y = x_or_pair[1] else: self.x = x_or_pair self.y = y def __len__(self): return 2 def __getitem__(self, key): if key == 0: return self.x elif key == 1: return self.y else: raise IndexError("Invalid subscript "+str(key)+" to Vec2d") def __setitem__(self, key, value): if key == 0: self.x = value elif key == 1: self.y = value else: raise IndexError("Invalid subscript "+str(key)+" to Vec2d") # String representaion (for debugging) def __repr__(self): return 'Vec2d(%s, %s)' % (self.x, self.y) # Comparison def __eq__(self, other): if hasattr(other, "__getitem__") and len(other) == 2: return self.x == other[0] and self.y == other[1] else: return False def __ne__(self, other): if hasattr(other, "__getitem__") and len(other) == 2: return self.x != other[0] or self.y != other[1] else: return True def __nonzero__(self): return bool(self.x or self.y) # Generic operator handlers def _o2(self, other, f): "Any two-operator operation where the left operand is a Vec2d" if isinstance(other, Vec2d): return Vec2d(f(self.x, other.x), f(self.y, other.y)) elif (hasattr(other, "__getitem__")): return Vec2d(f(self.x, other[0]), f(self.y, other[1])) else: return Vec2d(f(self.x, other), f(self.y, other)) def _r_o2(self, other, f): "Any two-operator operation where the right operand is a Vec2d" if (hasattr(other, "__getitem__")): return Vec2d(f(other[0], self.x), f(other[1], self.y)) else: return Vec2d(f(other, self.x), f(other, self.y)) def _io(self, other, f): "inplace operator" if (hasattr(other, "__getitem__")): self.x = f(self.x, other[0]) self.y = f(self.y, other[1]) else: self.x = f(self.x, other) self.y = f(self.y, other) return self # Addition def __add__(self, other): if isinstance(other, Vec2d): return Vec2d(self.x + other.x, self.y + other.y) elif hasattr(other, "__getitem__"): return Vec2d(self.x + other[0], self.y + other[1]) else: return Vec2d(self.x + other, self.y + other) __radd__ = __add__ def __iadd__(self, other): if isinstance(other, Vec2d): self.x += other.x self.y += other.y elif hasattr(other, "__getitem__"): self.x += other[0] self.y += other[1] else: self.x += other self.y += other return self # Subtraction def __sub__(self, other): if isinstance(other, Vec2d): return Vec2d(self.x - other.x, self.y - other.y) elif (hasattr(other, "__getitem__")): return Vec2d(self.x - other[0], self.y - other[1]) else: return Vec2d(self.x - other, self.y - other) def __rsub__(self, other): if isinstance(other, Vec2d): return Vec2d(other.x - self.x, other.y - self.y) if (hasattr(other, "__getitem__")): return Vec2d(other[0] - self.x, other[1] - self.y) else: return Vec2d(other - self.x, other - self.y) def __isub__(self, other): if isinstance(other, Vec2d): self.x -= other.x self.y -= other.y elif (hasattr(other, "__getitem__")): self.x -= other[0] self.y -= other[1] else: self.x -= other self.y -= other return self # Multiplication def __mul__(self, other): if isinstance(other, Vec2d): return Vec2d(self.x*other.x, self.y*other.y) if (hasattr(other, "__getitem__")): return Vec2d(self.x*other[0], self.y*other[1]) else: return Vec2d(self.x*other, self.y*other) __rmul__ = __mul__ def __imul__(self, other): if isinstance(other, Vec2d): self.x *= other.x self.y *= other.y elif (hasattr(other, "__getitem__")): self.x *= other[0] self.y *= other[1] else: self.x *= other self.y *= other return self # Division def __div__(self, other): return self._o2(other, operator.div) def __rdiv__(self, other): return self._r_o2(other, operator.div) def __idiv__(self, other): return self._io(other, operator.div) def __floordiv__(self, other): return self._o2(other, operator.floordiv) def __rfloordiv__(self, other): return self._r_o2(other, operator.floordiv) def __ifloordiv__(self, other): return self._io(other, operator.floordiv) def __truediv__(self, other): return self._o2(other, operator.truediv) def __rtruediv__(self, other): return self._r_o2(other, operator.truediv) def __itruediv__(self, other): return self._io(other, operator.floordiv) # Modulo def __mod__(self, other): return self._o2(other, operator.mod) def __rmod__(self, other): return self._r_o2(other, operator.mod) def __divmod__(self, other): return self._o2(other, operator.divmod) def __rdivmod__(self, other): return self._r_o2(other, operator.divmod) # Exponentation def __pow__(self, other): return self._o2(other, operator.pow) def __rpow__(self, other): return self._r_o2(other, operator.pow) # Bitwise operators def __lshift__(self, other): return self._o2(other, operator.lshift) def __rlshift__(self, other): return self._r_o2(other, operator.lshift) def __rshift__(self, other): return self._o2(other, operator.rshift) def __rrshift__(self, other): return self._r_o2(other, operator.rshift) def __and__(self, other): return self._o2(other, operator.and_) __rand__ = __and__ def __or__(self, other): return self._o2(other, operator.or_) __ror__ = __or__ def __xor__(self, other): return self._o2(other, operator.xor) __rxor__ = __xor__ # Unary operations def __neg__(self): return Vec2d(operator.neg(self.x), operator.neg(self.y)) def __pos__(self): return Vec2d(operator.pos(self.x), operator.pos(self.y)) def __abs__(self): return Vec2d(abs(self.x), abs(self.y)) def __invert__(self): return Vec2d(-self.x, -self.y) # vectory functions def get_length_sqrd(self): return self.x**2 + self.y**2 def get_length(self): return math.sqrt(self.x**2 + self.y**2) def __setlength(self, value): length = self.get_length() self.x *= value/length self.y *= value/length length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector") def rotate(self, angle_degrees): radians = math.radians(angle_degrees) cos = math.cos(radians) sin = math.sin(radians) x = self.x*cos - self.y*sin y = self.x*sin + self.y*cos self.x = x self.y = y def rotated(self, angle_degrees): radians = math.radians(angle_degrees) cos = math.cos(radians) sin = math.sin(radians) x = self.x*cos - self.y*sin y = self.x*sin + self.y*cos return Vec2d(x, y) def get_angle(self): if (self.get_length_sqrd() == 0): return 0 return math.degrees(math.atan2(self.y, self.x)) def __setangle(self, angle_degrees): self.x = self.length self.y = 0 self.rotate(angle_degrees) angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector") def get_angle_between(self, other): cross = self.x*other[1] - self.y*other[0] dot = self.x*other[0] + self.y*other[1] return math.degrees(math.atan2(cross, dot)) def normalized(self): length = self.length if length != 0: return self/length return Vec2d(self) def normalize_return_length(self): length = self.length if length != 0: self.x /= length self.y /= length return length def perpendicular(self): return Vec2d(-self.y, self.x) def perpendicular_normal(self): length = self.length if length != 0: return Vec2d(-self.y/length, self.x/length) return Vec2d(self) def dot(self, other): return float(self.x*other[0] + self.y*other[1]) def get_distance(self, other): return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2) def get_dist_sqrd(self, other): return (self.x - other[0])**2 + (self.y - other[1])**2 def projection(self, other): other_length_sqrd = other[0]*other[0] + other[1]*other[1] projected_length_times_other_length = self.dot(other) return other*(projected_length_times_other_length/other_length_sqrd) def cross(self, other): return self.x*other[1] - self.y*other[0] def interpolate_to(self, other, range): return Vec2d(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range) def convert_to_basis(self, x_vector, y_vector): return Vec2d(self.dot(x_vector)/x_vector.get_length_sqrd(), self.dot(y_vector)/y_vector.get_length_sqrd()) def __getstate__(self): return [self.x, self.y] def __setstate__(self, dict): self.x, self.y = dict
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from .file_utils import ModelOutput from .utils import logging logger = logging.get_logger(__name__) @dataclass class TFGreedySearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using greedy search. Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size, config.vocab_size)`). attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFGreedySearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size, config.vocab_size)`). encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using sampling. Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(num_return_sequences*batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`). encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam search. Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_beams, config.vocab_size)`). attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam sample. Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape :obj:`(batch_size*num_beams, config.vocab_size)`). encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_beams, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] class TFGenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in :class:`~transformers.TFPreTrainedModel`. """ def prepare_inputs_for_generation(self, inputs, **kwargs): """ Implement in subclasses of :class:`~transformers.TFPreTrainedModel` for custom behavior to prepare inputs in the generate method. """ return {"input_ids": inputs} def _use_cache(self, outputs, use_cache): """During generation, decide whether to pass the `past` variable to the next forward pass.""" use_cache = getattr(self.config, "use_cache", False) if len(outputs) <= 1 or use_cache is False: return False if hasattr(self.config, "mem_len") and self.config.mem_len == 0: return False return True def generate( self, input_ids=None, max_length=None, min_length=None, do_sample=None, early_stopping=None, num_beams=None, temperature=None, top_k=None, top_p=None, repetition_penalty=None, bad_words_ids=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, length_penalty=None, no_repeat_ngram_size=None, num_return_sequences=None, attention_mask=None, decoder_start_token_id=None, use_cache=None, output_scores=None, output_attentions=None, output_hidden_states=None, return_dict_in_generate=None, forced_bos_token_id=None, forced_eos_token_id=None, **model_kwargs, ) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`tf.Tensor` of shape :obj:`(1,)`. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults to 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. forced_bos_token_id (:obj:`int`, `optional`): The id of the token to force as the first generated token after the :obj:`decoder_start_token_id`. Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to be the target language token. forced_eos_token_id (:obj:`int`, `optional`): The id of the token to force as the last generated token when :obj:`max_length` is reached. model_specific_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. Return: :class:`~transformers.file_utils.ModelOutput` or :obj:`tf.Tensor`: A :class:`~transformers.file_utils.ModelOutput` (if ``return_dict_in_generate=True`` or when ``config.return_dict_in_generate=True``) or a :obj:`tf.Tensor`. If the model is `not` an encoder-decoder model (``model.config.is_encoder_decoder=False``), the possible :class:`~transformers.file_utils.ModelOutput` types are: - :class:`~transformers.generation_utils.TFGreedySearchDecoderOnlyOutput`, - :class:`~transformers.generation_utils.TFSampleDecoderOnlyOutput`, - :class:`~transformers.generation_utils.TFBeamSearchDecoderOnlyOutput`, - :class:`~transformers.generation_utils.TFBeamSampleDecoderOnlyOutput` If the model is an encoder-decoder model (``model.config.is_encoder_decoder=True``), the possible :class:`~transformers.file_utils.ModelOutput` types are: - :class:`~transformers.generation_utils.TFGreedySearchEncoderDecoderOutput`, - :class:`~transformers.generation_utils.TFSampleEncoderDecoderOutput`, - :class:`~transformers.generation_utils.TFBeamSearchEncoderDecoderOutput`, - :class:`~transformers.generation_utils.TFBeamSampleEncoderDecoderOutput` Examples:: tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from huggingface.co and cache. outputs = model.generate(max_length=40) # do greedy decoding print(f'Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}') tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from huggingface.co and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print(f'Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}') tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from huggingface.co and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print(f'Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}') tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from huggingface.co and cache. input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences print(f'Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}') tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from huggingface.co and cache. input_context = 'My cute dog' bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated """ # We cannot generate if the model does not have a LM head if self.get_output_embeddings() is None: raise AttributeError( "You tried to generate sequences with a model that does not have a LM Head." "Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)" ) max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length do_sample = do_sample if do_sample is not None else self.config.do_sample early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beams = num_beams if num_beams is not None else self.config.num_beams temperature = temperature if temperature is not None else self.config.temperature top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) model_kwargs["output_scores"] = output_scores model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states if self.config.is_encoder_decoder: model_kwargs["encoder_attentions"] = None model_kwargs["encoder_hidden_states"] = None if input_ids is not None: batch_size = shape_list(input_ids)[0] # overridden by the input batch_size else: batch_size = 1 assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." assert isinstance(do_sample, bool), "`do_sample` should be a boolean." assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." assert temperature > 0, "`temperature` should be strictly positive." assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." assert input_ids is not None or ( isinstance(bos_token_id, int) and bos_token_id >= 0 ), "If input_ids is not defined, `bos_token_id` should be a positive integer." assert pad_token_id is None or ( isinstance(pad_token_id, int) and (pad_token_id >= 0) ), "`pad_token_id` should be a positive integer." assert (eos_token_id is None) or ( isinstance(eos_token_id, int) and (eos_token_id >= 0) ), "`eos_token_id` should be a positive integer." assert length_penalty > 0, "`length_penalty` should be strictly positive." assert ( isinstance(num_return_sequences, int) and num_return_sequences > 0 ), "`num_return_sequences` should be a strictly positive integer." assert ( bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" if input_ids is None: assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( "you should either supply a context to complete as `input_ids` input " "or a `bos_token_id` (integer >= 0) as a first token to start the generation." ) input_ids = tf.fill((batch_size, 1), bos_token_id) else: assert len(shape_list(input_ids)) == 2, "Input prompt should be of shape (batch_size, sequence length)." # not allow to duplicate outputs when greedy decoding if do_sample is False: if num_beams == 1: # no_beam_search greedy generation conditions assert ( num_return_sequences == 1 ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" else: # beam_search greedy generation conditions assert ( num_beams >= num_return_sequences ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" # create attention mask if necessary # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids.numpy()): attention_mask = tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32) elif attention_mask is None: attention_mask = tf.ones_like(input_ids) if pad_token_id is None and eos_token_id is not None: logger.warning(f"Setting `pad_token_id` to {eos_token_id} (first `eos_token_id`) to generate sequence") pad_token_id = eos_token_id # current position and vocab size cur_len = shape_list(input_ids)[1] # unused vocab_size = self.config.vocab_size # set effective batch size and effective batch multiplier according to do_sample if do_sample: effective_batch_size = batch_size * num_return_sequences effective_batch_mult = num_return_sequences else: effective_batch_size = batch_size effective_batch_mult = 1 if self.config.is_encoder_decoder: if decoder_start_token_id is None: decoder_start_token_id = bos_token_id assert ( decoder_start_token_id is not None ), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" assert hasattr(self, "get_encoder"), f"{self} should have a 'get_encoder' function defined" assert callable(self.get_encoder), f"{self.get_encoder} should be a method" # get encoder and store encoder outputs encoder = self.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if return_dict_in_generate: if output_attentions: model_kwargs["encoder_attentions"] = encoder_outputs.attentions if output_hidden_states: model_kwargs["encoder_hidden_states"] = encoder_outputs.hidden_states # Expand input ids if num_beams > 1 or num_return_sequences > 1 if num_return_sequences > 1 or num_beams > 1: input_ids_len = shape_list(input_ids)[-1] input_ids = tf.broadcast_to( tf.expand_dims(input_ids, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) ) attention_mask = tf.broadcast_to( tf.expand_dims(attention_mask, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) ) input_ids = tf.reshape( input_ids, (effective_batch_size * num_beams, input_ids_len) ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) attention_mask = tf.reshape( attention_mask, (effective_batch_size * num_beams, input_ids_len) ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) if self.config.is_encoder_decoder: # create empty decoder_input_ids input_ids = ( tf.ones( (effective_batch_size * num_beams, 1), dtype=tf.int32, ) * decoder_start_token_id ) cur_len = 1 assert ( batch_size == encoder_outputs[0].shape[0] ), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} " # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) expanded_batch_idxs = tf.reshape( tf.repeat(tf.expand_dims(tf.range(batch_size), -1), repeats=num_beams * effective_batch_mult, axis=1), shape=(-1,), ) # expand encoder_outputs encoder_outputs = (tf.gather(encoder_outputs[0], expanded_batch_idxs, axis=0),) else: encoder_outputs = None cur_len = shape_list(input_ids)[-1] assert ( cur_len < max_length ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" if num_beams > 1: output = self._generate_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, encoder_outputs=encoder_outputs, attention_mask=attention_mask, use_cache=use_cache, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) else: output = self._generate_no_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, vocab_size=vocab_size, encoder_outputs=encoder_outputs, attention_mask=attention_mask, use_cache=use_cache, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) return output def _generate_no_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, vocab_size, encoder_outputs, attention_mask, use_cache, return_dict_in_generate, **kwargs ) -> Union[TFGreedySearchOutput, TFSampleOutput, tf.Tensor]: """ Generate sequences for each example without beam search (num_beams == 1). All returned sequences are generated independently. """ # length of generated sentences / unfinished sentences unfinished_sents = tf.ones_like(input_ids[:, 0]) sent_lengths = tf.ones_like(input_ids[:, 0]) * max_length past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and kwargs["output_scores"]) else None decoder_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None cross_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None decoder_hidden_states = () if (return_dict_in_generate and kwargs["output_hidden_states"]) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if self.config.is_encoder_decoder: encoder_attentions = ( kwargs["encoder_attentions"] if (return_dict_in_generate and kwargs["encoder_attentions"]) else None ) encoder_hidden_states = ( kwargs["encoder_hidden_states"] if (return_dict_in_generate and kwargs["encoder_hidden_states"]) else None ) while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **kwargs ) outputs = self( **model_inputs, return_dict=True, output_attentions=kwargs["output_attentions"], output_hidden_states=kwargs["output_hidden_states"], ) next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if kwargs["output_scores"]: scores += (next_token_logits,) if kwargs["output_attentions"]: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if kwargs["output_hidden_states"]: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # if model has past, then set the past variable to speed up decoding if self._use_cache(outputs, use_cache): past = outputs[1] # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) if repetition_penalty != 1.0: next_token_logits_penalties = _create_next_token_logits_penalties( input_ids, next_token_logits, repetition_penalty ) next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) if no_repeat_ngram_size > 0: # calculate a list of banned tokens to prevent repetitively generating the same ngrams # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len) # create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) if bad_words_ids is not None: # calculate a list of banned tokens according to bad words banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) # set eos token prob to zero if min_length is not reached if eos_token_id is not None and cur_len < min_length: # create eos_token_id boolean mask is_token_logit_eos_token = tf.convert_to_tensor( [True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool ) eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [batch_size, vocab_size]) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, eos_token_indices_mask, -float("inf") ) if do_sample: # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: next_token_logits = next_token_logits / temperature # Top-p/top-k filtering next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) # Sample next_token = tf.squeeze( tf.random.categorical(next_token_logits, dtype=tf.int32, num_samples=1), axis=1 ) else: # Greedy decoding next_token = tf.math.argmax(next_token_logits, axis=-1, output_type=tf.int32) # update generations and finished sentences if eos_token_id is not None: # pad finished sentences if eos_token_id exist tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) else: tokens_to_add = next_token # add token and increase length by one input_ids = tf.concat([input_ids, tf.expand_dims(tokens_to_add, -1)], 1) cur_len = cur_len + 1 if eos_token_id is not None: eos_in_sents = tokens_to_add == eos_token_id # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length is_sents_unfinished_and_token_to_add_is_eos = tf.math.multiply( unfinished_sents, tf.cast(eos_in_sents, tf.int32) ) sent_lengths = ( sent_lengths * (1 - is_sents_unfinished_and_token_to_add_is_eos) + cur_len * is_sents_unfinished_and_token_to_add_is_eos ) # unfinished_sents is set to zero if eos in sentence unfinished_sents -= is_sents_unfinished_and_token_to_add_is_eos # stop when there is a </s> in each sentence, or if we exceed the maximum length if tf.math.reduce_max(unfinished_sents) == 0: break # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) # if there are different sentences lengths in the batch, some batches have to be padded min_sent_length = tf.math.reduce_min(sent_lengths) max_sent_length = tf.math.reduce_max(sent_lengths) if min_sent_length != max_sent_length: assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths" # finished sents are filled with pad_token padding = tf.ones([batch_size, max_sent_length.numpy()], dtype=tf.int32) * pad_token_id # create length masks for tf.where operation broad_casted_sent_lengths = tf.broadcast_to( tf.expand_dims(sent_lengths, -1), [batch_size, max_sent_length] ) broad_casted_range = tf.transpose( tf.broadcast_to(tf.expand_dims(tf.range(max_sent_length), -1), [max_sent_length, batch_size]) ) decoded = tf.where(broad_casted_range < broad_casted_sent_lengths, input_ids, padding) else: decoded = input_ids if return_dict_in_generate: if do_sample: if self.config.is_encoder_decoder: return TFSampleEncoderDecoderOutput( sequences=decoded, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFSampleDecoderOnlyOutput( sequences=decoded, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: if self.config.is_encoder_decoder: return TFGreedySearchEncoderDecoderOutput( sequences=decoded, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFGreedySearchDecoderOnlyOutput( sequences=decoded, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return decoded def _generate_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, early_stopping, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, num_return_sequences, length_penalty, num_beams, vocab_size, encoder_outputs, attention_mask, use_cache, forced_bos_token_id, forced_eos_token_id, return_dict_in_generate, **kwargs, ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: """Generate sequences for each example with beam search.""" # generated hypotheses generated_hyps = [ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) for _ in range(batch_size) ] # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times if do_sample is False: beam_scores_begin = tf.zeros((batch_size, 1), dtype=tf.float32) beam_scores_end = tf.ones((batch_size, num_beams - 1), dtype=tf.float32) * (-1e9) beam_scores = tf.concat([beam_scores_begin, beam_scores_end], -1) else: beam_scores = tf.zeros((batch_size, num_beams), dtype=tf.float32) beam_scores = tf.reshape(beam_scores, (batch_size * num_beams,)) # cache compute states past = encoder_outputs # to stay similar to torch : past = (encoder_outputs, None) if encoder_outputs is not None else None # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and kwargs["output_scores"]) else None decoder_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None cross_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None decoder_hidden_states = () if (return_dict_in_generate and kwargs["output_hidden_states"]) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if self.config.is_encoder_decoder: encoder_attentions = ( kwargs["encoder_attentions"] if (return_dict_in_generate and kwargs["encoder_attentions"]) else None ) encoder_hidden_states = ( kwargs["encoder_hidden_states"] if (return_dict_in_generate and kwargs["encoder_hidden_states"]) else None ) # done sentences done = [False for _ in range(batch_size)] while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **kwargs ) outputs = self( **model_inputs, return_dict=True, output_attentions=kwargs["output_attentions"], output_hidden_states=kwargs["output_hidden_states"], ) next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) # if model has past, then set the past variable to speed up decoding if self._use_cache(outputs, use_cache): past = outputs[1] # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) if repetition_penalty != 1.0: next_token_logits_penalties = _create_next_token_logits_penalties( input_ids, next_token_logits, repetition_penalty ) next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: next_token_logits = next_token_logits / temperature if self.config.is_encoder_decoder and do_sample is False: next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, ) # calculate log softmax score scores = tf.nn.log_softmax(next_token_logits, axis=-1) # (batch_size * num_beams, vocab_size) # set eos token prob to zero if min_length is not reached if eos_token_id is not None and cur_len < min_length: # create eos_token_id boolean mask num_batch_hypotheses = batch_size * num_beams is_token_logit_eos_token = tf.convert_to_tensor( [True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool ) eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [num_batch_hypotheses, vocab_size]) scores = set_tensor_by_indices_to_value(scores, eos_token_indices_mask, -float("inf")) if no_repeat_ngram_size > 0: # calculate a list of banned tokens to prevent repetitively generating the same ngrams # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 num_batch_hypotheses = batch_size * num_beams banned_tokens = calc_banned_ngram_tokens( input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len ) # create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = set_tensor_by_indices_to_value( scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) if bad_words_ids is not None: # calculate a list of banned tokens according to bad words banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = set_tensor_by_indices_to_value( scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) assert shape_list(scores) == [batch_size * num_beams, vocab_size] if do_sample: _scores = scores + tf.broadcast_to( beam_scores[:, None], (batch_size * num_beams, vocab_size) ) # (batch_size * num_beams, vocab_size) # Top-p/top-k filtering _scores = tf_top_k_top_p_filtering( _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 ) # (batch_size * num_beams, vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) _scores = tf.reshape(_scores, (batch_size, num_beams * vocab_size)) next_tokens = sample_without_replacement( _scores, num_samples=2 * num_beams ) # (batch_size, 2 * num_beams) # Compute next scores next_scores = tf.gather(_scores, next_tokens, batch_dims=1) # (batch_size, 2 * num_beams) # sort the sampled vector to make sure that the first num_beams samples are the best next_scores_indices = tf.argsort(next_scores, direction="DESCENDING", axis=1) next_scores = tf.gather(next_scores, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) next_tokens = tf.gather(next_tokens, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) else: # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) next_scores = scores + tf.broadcast_to( beam_scores[:, None], (batch_size * num_beams, vocab_size) ) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis across beams) next_scores = tf.reshape( next_scores, (batch_size, num_beams * vocab_size) ) # (batch_size, num_beams * vocab_size) next_scores, next_tokens = tf.math.top_k(next_scores, k=2 * num_beams, sorted=True) assert shape_list(next_scores) == shape_list(next_tokens) == [batch_size, 2 * num_beams] # Store scores, attentions and hidden_states when required if return_dict_in_generate: if kwargs["output_scores"]: scores += (next_token_logits,) if kwargs["output_attentions"]: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if kwargs["output_hidden_states"]: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # next batch beam content next_batch_beam = [] # for each sentence for batch_idx in range(batch_size): # if we are done with this sentence if done[batch_idx]: assert ( len(generated_hyps[batch_idx]) >= num_beams ), f"Batch can only be done if at least {num_beams} beams have been generated." assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch continue # next sentence beam content next_sent_beam = [] # next tokens for this sentence for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx]) ): # get beam and token IDs beam_id = beam_token_id // vocab_size token_id = beam_token_id % vocab_size effective_beam_id = batch_idx * num_beams + beam_id # add to generated hypotheses if end of sentence or last iteration if (eos_token_id is not None) and (token_id.numpy() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams if is_beam_token_worse_than_top_num_beams: continue generated_hyps[batch_idx].add( tf.identity(input_ids[effective_beam_id]), beam_token_score.numpy() ) else: # add next predicted token if it is not eos_token next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) # the beam for next step is full if len(next_sent_beam) == num_beams: break # Check if we are done so that we can save a pad step if all(done) done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( tf.reduce_max(next_scores[batch_idx]).numpy(), cur_len ) # update next beam content assert len(next_sent_beam) == num_beams, "Beam should always be full" next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == num_beams * (batch_idx + 1) # stop when we are done with each sentence if all(done): break # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * num_beams beam_scores = tf.convert_to_tensor([x[0] for x in next_batch_beam], dtype=tf.float32) beam_tokens = tf.convert_to_tensor([x[1] for x in next_batch_beam], dtype=tf.int32) beam_idx = tf.convert_to_tensor([x[2] for x in next_batch_beam], dtype=tf.int32) # re-order batch and update current length input_ids = tf.stack([tf.identity(input_ids[x, :]) for x in beam_idx]) input_ids = tf.concat([input_ids, tf.expand_dims(beam_tokens, 1)], axis=-1) cur_len = cur_len + 1 # re-order internal states if past is not None: past = self._reorder_cache(past, beam_idx) # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) # finalize all open beam hypotheses and end to generated hypotheses for batch_idx in range(batch_size): # Add all open beam hypothesis to generated_hyps if done[batch_idx]: continue # test that beam scores match previously calculated scores if not eos and batch_idx not done if eos_token_id is not None and all( (token_id % vocab_size).numpy().item() != eos_token_id for token_id in next_tokens[batch_idx] ): if not tf.reduce_all( next_scores[batch_idx, :num_beams] == tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx] ): raise ValueError( f"If batch_idx is not done, final next scores: {next_scores[:, :num_beams][batch_idx]} have " "to equal to accumulated beam_scores: " f"{tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx]}" ) # need to add best num_beams hypotheses to generated hyps for beam_id in range(num_beams): effective_beam_id = batch_idx * num_beams + beam_id final_score = beam_scores[effective_beam_id].numpy().item() final_tokens = input_ids[effective_beam_id] generated_hyps[batch_idx].add(final_tokens, final_score) # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch output_batch_size = batch_size if do_sample else batch_size * num_return_sequences output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences # select the best hypotheses sent_lengths_list = [] best = [] # retrieve best hypotheses for i, hypotheses in enumerate(generated_hyps): sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) for j in range(output_num_return_sequences_per_batch): best_hyp = sorted_hyps.pop()[1] sent_lengths_list.append(len(best_hyp)) best.append(best_hyp) assert output_batch_size == len( best ), f"Output batch size {output_batch_size} must match output beam hypotheses {len(best)}" sent_lengths = tf.convert_to_tensor(sent_lengths_list, dtype=tf.int32) # shorter batches are filled with pad_token if tf.reduce_min(sent_lengths).numpy() != tf.reduce_max(sent_lengths).numpy(): assert pad_token_id is not None, "`Pad_token_id` has to be defined" sent_max_len = min(tf.reduce_max(sent_lengths).numpy() + 1, max_length) decoded_list = [] # fill with hypothesis and eos_token_id if necessary for i, hypo in enumerate(best): assert sent_lengths[i] == shape_list(hypo)[0] # if sent_length is max_len do not pad if sent_lengths[i] == sent_max_len: decoded_slice = hypo else: # else pad to sent_max_len num_pad_tokens = sent_max_len - sent_lengths[i] padding = pad_token_id * tf.ones((num_pad_tokens,), dtype=tf.int32) decoded_slice = tf.concat([hypo, padding], axis=-1) # finish sentence with EOS token if sent_lengths[i] < max_length: decoded_slice = tf.where( tf.range(sent_max_len, dtype=tf.int32) == sent_lengths[i], eos_token_id * tf.ones((sent_max_len,), dtype=tf.int32), decoded_slice, ) # add to list decoded_list.append(decoded_slice) decoded = tf.stack(decoded_list) else: # none of the hypotheses have an eos_token assert (len(hypo) == max_length for hypo in best) decoded = tf.stack(best) if return_dict_in_generate: if do_sample and self.config.is_encoder_decoder: return TFBeamSampleEncoderDecoderOutput( sequences=decoded, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) elif do_sample and not self.config.is_encoder_decoder: return TFBeamSampleDecoderOnlyOutput( sequences=decoded, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) elif self.config.is_encoder_decoder: return TFBeamSearchEncoderDecoderOutput( sequences=decoded, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFBeamSearchDecoderOnlyOutput( sequences=decoded, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return decoded @staticmethod def _reorder_cache(past, beam_idx): return tuple(tf.gather(layer_past, beam_idx, axis=1) for layer_past in past) def adjust_logits_during_generation( self, logits, cur_len, max_length, forced_bos_token_id, forced_eos_token_id, **kwargs ): """ Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to adjust the logits in the generate method. """ if cur_len == 1 and forced_bos_token_id is not None: vocab_range = tf.constant(range(self.config.vocab_size)) return tf.where(vocab_range != forced_bos_token_id, -1e8, logits) elif cur_len == max_length - 1 and forced_eos_token_id is not None: vocab_range = tf.constant(range(self.config.vocab_size)) return tf.where(vocab_range != forced_eos_token_id, -1e8, logits) else: return logits def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty): # create logit penalties for already seen input_ids token_penalties = np.ones(shape_list(logits)) prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()] for i, prev_input_id in enumerate(prev_input_ids): logit_penalized = logits[i].numpy()[prev_input_id] logit_penalties = np.zeros(logit_penalized.shape) # if previous logit score is < 0 then multiply repetition penalty else divide logit_penalties[logit_penalized < 0] = repetition_penalty logit_penalties[logit_penalized > 0] = 1 / repetition_penalty np.put(token_penalties[i], prev_input_id, logit_penalties) return tf.convert_to_tensor(token_penalties, dtype=tf.float32) def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len): # Copied from fairseq for no_repeat_ngram in beam_search if cur_len + 1 < no_repeat_ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].numpy().tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - no_repeat_ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids): banned_tokens = [] def _tokens_match(prev_tokens, tokens): if len(tokens) == 0: # if bad word tokens is just one token always ban it return True if len(tokens) > len(prev_tokens): # if bad word tokens are longer than prev tokens they can't be equal return False if prev_tokens[-len(tokens) :] == tokens: # if tokens match return True else: return False for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in bad_words_ids: assert ( len(banned_token_seq) > 0 ), f"Banned words token sequences { bad_words_ids} cannot have an empty list" if _tokens_match(prev_input_ids_slice.numpy().tolist(), banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ logits_shape = shape_list(logits) if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) if top_p < 1.0: sorted_indices = tf.argsort(logits, direction="DESCENDING") sorted_logits = tf.gather( logits, sorted_indices, axis=-1, batch_dims=1 ) # expects logits to be of dim (batch_size, vocab_size) cumulative_probs = tf.math.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove = tf.concat( [ tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), sorted_indices_to_remove[:, min_tokens_to_keep:], ], -1, ) # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove = tf.concat( [tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]], -1, ) # scatter sorted tensors to original indexing indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) return logits def scatter_values_on_batch_indices(values, batch_indices): shape = shape_list(batch_indices) # broadcast batch dim to shape broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) def set_tensor_by_indices_to_value(tensor, indices, value): # create value_tensor since tensor value assignment is not possible in TF value_tensor = tf.zeros_like(tensor) + value return tf.where(indices, value_tensor, tensor) def sample_without_replacement(logits, num_samples): """ categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)) _, indices = tf.nn.top_k(logits + z, num_samples) return indices def shape_list(x): """Deal with dynamic shape in tensorflow cleanly.""" static = x.shape.as_list() dynamic = tf.shape(x) return [dynamic[i] if s is None else s for i, s in enumerate(static)] class BeamHypotheses(object): def __init__(self, num_beams, max_length, length_penalty, early_stopping): """ Initialize n-best list of hypotheses. """ self.max_length = max_length - 1 # ignoring bos_token self.length_penalty = length_penalty self.early_stopping = early_stopping self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add(self, hyp, sum_logprobs): """ Add a new hypothesis to the list. """ score = sum_logprobs / len(hyp) ** self.length_penalty if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp)) if len(self) > self.num_beams: sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) del self.beams[sorted_scores[0][1]] self.worst_score = sorted_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs, cur_len): """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False elif self.early_stopping: return True else: cur_score = best_sum_logprobs / cur_len ** self.length_penalty ret = self.worst_score >= cur_score return ret
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """Classes for reading/writing binary data (such as TLS records).""" from .compat import * class Writer: def __init__(self): self.bytes = createByteArrayZeros(0) def add(self, x, length): self.bytes += createByteArrayZeros(length) newIndex = len(self.bytes) - 1 for count in range(length): self.bytes[newIndex] = x & 0xFF x >>= 8 newIndex -= 1 def addFixSeq(self, seq, length): for e in seq: self.add(e, length) def addVarSeq(self, seq, length, lengthLength): self.add(len(seq) * length, lengthLength) for e in seq: self.add(e, length) class Parser: def __init__(self, bytes): self.bytes = bytes self.index = 0 def get(self, length): if self.index + length > len(self.bytes): raise SyntaxError() x = 0 for count in range(length): x <<= 8 x |= self.bytes[self.index] self.index += 1 return x def getFixBytes(self, lengthBytes): bytes = self.bytes[self.index : self.index + lengthBytes] self.index += lengthBytes return bytes def getVarBytes(self, lengthLength): lengthBytes = self.get(lengthLength) return self.getFixBytes(lengthBytes) def getFixList(self, length, lengthList): l = [0] * lengthList for x in range(lengthList): l[x] = self.get(length) return l def getVarList(self, length, lengthLength): lengthList = self.get(lengthLength) if lengthList % length != 0: raise SyntaxError() lengthList = lengthList // length l = [0] * lengthList for x in range(lengthList): l[x] = self.get(length) return l def startLengthCheck(self, lengthLength): self.lengthCheck = self.get(lengthLength) self.indexCheck = self.index def setLengthCheck(self, length): self.lengthCheck = length self.indexCheck = self.index def stopLengthCheck(self): if (self.index - self.indexCheck) != self.lengthCheck: raise SyntaxError() def atLengthCheck(self): if (self.index - self.indexCheck) < self.lengthCheck: return False elif (self.index - self.indexCheck) == self.lengthCheck: return True else: raise SyntaxError()
# -*- coding: utf-8 -*- # Copyright 2014 Dev in Cachu authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import datetime import hashlib import random from django.db import models class Participante(models.Model): TAMANHOS_DE_CAMISETA = ( (u'P', u'P (53cm x 71cm)'), (u'M', u'M (56cm x 74cm)'), (u'G', u'G (58cm x 76cm)'), (u'GG', u'GG (62cm x 80cm)'), ) SEXOS = ( (u'M', u'Masculino'), (u'F', u'Feminino'), ) STATUS = ( (u'AGUARDANDO', u'Aguardando pagamento'), (u'CONFIRMADO', u'Confirmado'), (u'CANCELADO', u'Cancelado'), (u'CORTESIA', u'Cortesia'), (u'PALESTRANTE', u'Palestrante'), (u'ORGANIZACAO', u'Organização'), (u'CARAVANA', u'Caravana'), ) nome = models.CharField(max_length=100) nome_cracha = models.CharField(max_length=100, verbose_name=u"Nome no crachá", blank=True, null=True) cidade = models.CharField(max_length=255, verbose_name=u"Cidade/Estado") sexo = models.CharField(max_length=1, choices=SEXOS) email = models.EmailField(max_length=100) status = models.CharField(max_length=20, choices=STATUS, default=u'AGUARDANDO') instituicao_ensino = models.CharField( max_length=100, verbose_name=u"Instituição de ensino (estudantes)", blank=True, null=True) empresa = models.CharField(max_length=100, verbose_name=u"Empresa onde trabalha", blank=True, null=True) observacao = models.CharField(max_length=1000, verbose_name=u"Observação", blank=True, null=True) presente = models.BooleanField(default=False) def __unicode__(self): return self.nome class Meta: unique_together = ((u'email', u'status',),) class Checkout(models.Model): codigo = models.CharField(max_length=100) participante = models.ForeignKey(Participante) def __unicode__(self): return "%s (%s - %s)" % (self.codigo, self.participante.nome, self.participante.email) class Certificado(models.Model): participante = models.ForeignKey(Participante) codigo = models.CharField(max_length=14, unique=True) hash = models.CharField(max_length=100, unique=True) horas = models.IntegerField(default=8) def __unicode__(self): return "%s (%s)" % (self.codigo, self.participante.nome) @classmethod def gerar_certificado(cls, participante): if participante.presente: cert = Certificado(participante=participante, horas=8) cert.codigo = cls._calcular_codigo(participante) cert.hash = cls._calcular_hash(participante) cert.save() return cert @classmethod def _calcular_codigo(cls, participante): return "2014%04d%04d" % (random.randint(1, 9999), participante.pk) @classmethod def _calcular_hash(cls, participante): rand = random.randint(1, 9999) now = datetime.datetime.now() bstr = "%s%04d%s" % (participante.email, rand, now.isoformat()) return hashlib.sha1(bstr).hexdigest() class Configuracao(models.Model): STATUS = ( (u"fechadas", u"Fechadas (inscrições ainda não abriram)"), (u"abertas", u"Inscrições abertas"), (u"encerradas", u"Inscrições encerradas"), ) valor_inscricao = models.FloatField(verbose_name=u"Valor da inscrição") status = models.CharField(max_length=10, choices=STATUS) def __unicode__(self): return u"Configuração das inscrições do Dev in Cachu 2012" class Meta: verbose_name = u"Configuração das inscrições" verbose_name_plural = verbose_name
from itertools import chain from waffle.models import Flag, Switch, Sample from rest_framework import generics from rest_framework import permissions as drf_permissions from api.base.views import JSONAPIBaseView from api.base.permissions import TokenHasScope from api.waffle.serializers import WaffleSerializer from framework.auth.oauth_scopes import CoreScopes class WaffleList(JSONAPIBaseView, generics.ListAPIView): """List of waffle switches, samples, and flags for use in feature flipping. This is a nonstandard, heterogeneous endpoint that you can filter against to fetch more than one flag, switch, or sample in a single request. This is an example of how to query against the _waffle endpoint: ``/v2/_waffle/?samples=test_sample&flags=test_flag,second_flag` ##Waffle Attributes Waffle entities have the "waffle" `type`. name type description ======================================================================================== id string <flag/switch/sample>_<resource_id> name string The human/computer readable name of the flag/sample/switch. note string Description of where flag/sample/switch is used or other details active boolean Whether the flag/sample/switch is active for the logged-in user ##Links See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination). ##Actions *None*. ##Query Params + `page=<Int>` -- page number of results to view, default 1 + `flags=<>` -- comma-separated list of flag names + `switches=<>` -- comma-separated list of switch names + `samples=<>` -- comman-separated list of sample names #This Request/Response """ permission_classes = ( TokenHasScope, drf_permissions.IsAuthenticatedOrReadOnly, ) required_read_scopes = [CoreScopes.WAFFLE_READ] required_write_scopes = [CoreScopes.NULL] serializer_class = WaffleSerializer view_category = 'waffle' view_name = 'waffle-list' # overrides ListAPIView def get_queryset(self): query_params = self.request.query_params if query_params: flags = Flag.objects.filter(name__in=query_params['flags'].split(',')) if 'flags' in query_params else [] switches = Switch.objects.filter(name__in=query_params['switches'].split(',')) if 'switches' in query_params else [] samples = Sample.objects.filter(name__in=query_params['samples'].split(',')) if 'samples' in query_params else [] return list(chain(flags, switches, samples)) else: return list(chain(Flag.objects.all(), Switch.objects.all(), Sample.objects.all()))
import linuxcnc import os,sys # path to the configuration the user requested # used to see if the is local handler files to use try: CONFIGPATH = os.environ['CONFIG_DIR'] CONFIGDIR = os.path.join(CONFIGPATH, 'panelui_handler.py') except: print '**** PANEL COMMAND: no panelui_handlers.py file in config directory' CONFIGPATH = os.path.expanduser("~") CONFIGDIR = os.path.join(CONFIGPATH, 'panelui_handler.py') # constants JOGJOINT = 1 JOGTELEOP = 0 inifile = linuxcnc.ini(os.environ['INI_FILE_NAME']) trajcoordinates = inifile.find("TRAJ", "COORDINATES").lower().replace(" ","") jointcount = int(inifile.find("KINS","JOINTS")) DBG_state = 0 def DBG(str): if DBG_state > 0: print str # Loads user commands from a file named 'panelui_handler.py' from the config def load_handlers(usermod,halcomp,builder,commands,master): hdl_func = 'get_handlers' mod = object = None def add_handler(method, f): if method in handlers: handlers[method].append(f) else: handlers[method] = [f] handlers = {} for u in usermod: (directory,filename) = os.path.split(u) (basename,extension) = os.path.splitext(filename) if directory == '': directory = '.' if directory not in sys.path: sys.path.insert(0,directory) DBG( 'panelui: adding import dir %s' % directory) try: mod = __import__(basename) except ImportError,msg: print ("panelui: module '%s' skipped - import error: %s" %(basename,msg)) continue DBG( "panelui: module '%s' imported OK" % mod.__name__) try: # look for 'get_handlers' function h = getattr(mod,hdl_func,None) if h and callable(h): DBG("panelui: module '%s' : '%s' function found" % (mod.__name__,hdl_func)) objlist = h(halcomp,builder,commands,master) else: # the module has no get_handlers() callable. # in this case we permit any callable except class Objects in the module to register as handler DBG("panelui: module '%s': no '%s' function - registering only functions as callbacks" % (mod.__name__,hdl_func)) objlist = [mod] # extract callback candidates for object in objlist: #DBG("Registering handlers in module %s object %s" % (mod.__name__, object)) if isinstance(object, dict): methods = dict.items() else: methods = map(lambda n: (n, getattr(object, n, None)), dir(object)) for method,f in methods: if method.startswith('_'): continue if callable(f): DBG("panelui: Register callback '%s' in %s" % (method, basename)) add_handler(method, f) except Exception, e: print ("**** PANELUI ERROR: trouble looking for handlers in '%s': %s" %(basename, e)) # Wrap lists in Trampoline, unwrap single functions for n,v in list(handlers.items()): if len(v) == 1: handlers[n] = v[0] else: handlers[n] = Trampoline(v) return handlers,mod,object # trampoline and load_handlers are used for custom keyboard commands class Trampoline(object): def __init__(self,methods): self.methods = methods def __call__(self, *a, **kw): for m in self.methods: m(*a, **kw) # linuxcnc commands class CNC_COMMANDS(): def __init__(self, master): global DBG_state DBG_state = master._dbg self.emc = linuxcnc self.emcstat = linuxcnc.stat() self.emccommand = linuxcnc.command() self.return_to_mode = -1 # if not -1 return to the mode specified self.sb = 0; self.jog_velocity = 100.0/60.0 self.angular_jog_velocity = 3600/60 self._mdi = 0 self.isjogging = [0,0,0,0,0,0,0,0,0] self.restart_line_number = self.restart_reset_line = 0 try: handlers,self.handler_module,self.handler_instance = \ load_handlers([CONFIGDIR], self.emcstat, self.emccommand,self, master) except Exception, e: print e def mdi_active(self, wname, m): self._mdi = m def mist_on(self, wname, b): self.emccommand.mist(1) def mist_off(self, wname, b): self.emccommand.mist(0) def flood_on(self, wname, b): self.emccommand.flood(1) def flood_off(self, wname, b): self.emccommand.flood(0) def estop(self, wname, b): self.emccommand.state(self.emc.STATE_ESTOP) def estop_reset(self, wname, b): self.emccommand.state(self.emc.STATE_ESTOP_RESET) def machine_off(self, wname, b): self.emccommand.state(self.emc.STATE_OFF) def machine_on(self, wname, b): self.emccommand.state(self.emc.STATE_ON) def home_all(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.home(-1) def unhome_all(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.unhome(-1) def home_selected(self, wname, joint): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.home(int(joint)) def unhome_selected(self, wname, joint): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.unhome(int(joint)) def jogging(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) def override_limits(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.override_limits() def spindle_forward_adjust(self, wname, rpm=100): if self.get_mode() == self.emc.MODE_MDI: self.emccommand.mode(self.emc.MODE_MANUAL) speed = self.is_spindle_running() if speed == 0: self.emccommand.spindle(1,float(rpm)); elif speed > 0: self.emccommand.spindle(self.emc.SPINDLE_INCREASE) else: self.emccommand.spindle(self.emc.SPINDLE_DECREASE) def spindle_forward(self, wname, rpm=100): self.emccommand.mode(self.emc.MODE_MANUAL) speed = self.is_spindle_running() if speed == 0: self.emccommand.spindle(1,float(rpm)); def spindle_stop(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.spindle(0); def spindle_reverse(self, wname, rpm=100): self.emccommand.mode(self.emc.MODE_MANUAL) speed = self.is_spindle_running() if speed == 0: self.emccommand.spindle(-1,float(rpm)); def spindle_reverse_adjust(self, wname, rpm=100): if self.get_mode() == self.emc.MODE_MDI: self.emccommand.mode(self.emc.MODE_MANUAL) speed = self.is_spindle_running() if speed == 0: self.emccommand.spindle(-1,float(rpm)); elif speed < 0: self.emccommand.spindle(self.emc.SPINDLE_INCREASE) else: self.emccommand.spindle(self.emc.SPINDLE_DECREASE) def spindle_faster(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.spindle(self.emc.SPINDLE_INCREASE) def spindle_slower(self, wname, b): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.spindle(self.emc.SPINDLE_DECREASE) def set_linear_jog_velocity(self, wname, cmd): velocity = float(cmd) if velocity is not None: rate = self.jog_velocity = velocity / 60.0 for axisnum in (0,1,2,6,7,8): if self.isjogging[axisnum]: jjogmode,j_or_a = self.get_jog_info(axisnum) self.emccommand.jog(self.emc.JOG_CONTINUOUS, jjogmode, j_or_a, self.isjogging[i] * rate) def set_angular_jog_velocity(self, wname, cmd): angular = float(cmd) if velocity is not None: rate = self.angular_jog_velocity = angular / 60.0 for axisnum in (3,4,5): if self.isjogging[axisnum]: jjogmode,j_or_a = self.get_jog_info(axisnum) self.emccommand.jog(self.emc.JOG_CONTINUOUS, jjogmode, j_or_a, self.isjogging[i] * rate) def continuous_jog(self, wname, cmd): axisnum = int(cmd[0]) jjogmode,j_or_a = self.get_jog_info(axisnum) direction = int(cmd[1]) if direction == 0: self.isjogging[axisnum] = 0 self.emccommand.jog(self.emc.JOG_STOP, jjogmode, j_or_a) else: if axisnum in (3,4,5): rate = self.angular_jog_velocity else: rate = self.jog_velocity self.isjogging[axisnum] = direction self.emccommand.jog(self.emc.JOG_CONTINUOUS, jjogmode, j_or_a, direction * rate) def incremental_jog(self, wname, cmd): axisnum = int(cmd[0]) jjogmode,j_or_a = self.get_jog_info(axisnum) direction = int(cmd[1]) distance = float(cmd[2]) self.isjogging[axisnum] = direction if axisnum in (3,4,5): rate = self.angular_jog_velocity else: rate = self.jog_velocity self.emccommand.jog(self.emc.JOG_INCREMENT, jjogmode, axisnum, direction * rate, distance) self.isjogging[axisnum] = 0 def quill_up(self, wname, cmd): self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.wait_complete() self.mdi('G53 G0 Z %f'% float(cmd)) def feed_hold(self, wname, cmd): self.emccommand.set_feed_hold(int(cmd)) def feed_override(self, wname, f): self.emccommand.feedrate(f) def rapid_override(self, wname, f): self.emccommand.rapidrate(f) def spindle_override(self, wname, s): self.emccommand.spindleoverride(s) def max_velocity(self, wname, m): self.emccommand.maxvel(m) def reload_tooltable(self, wname, b): self.emccommand.load_tool_table() def optional_stop(self, wname, cmd): self.emccommand.set_optional_stop(int(cmd)) def block_delete(self, wname, cmd): self.emccommand.set_block_delete(int(cmd)) def abort(self, wname, cmd=None): self.emccommand.abort() def pause(self, wname, cmd=None): self.emccommand.auto(self.emc.AUTO_PAUSE) def resume(self, wname, cmd=None): self.emccommand.auto(self.emc.AUTO_RESUME) def single_block(self, wname, s): self.sb = s self.emcstat.poll() if self.emcstat.queue > 0 or self.emcstat.paused: # program or mdi is running if s: self.emccommand.auto(self.emc.AUTO_PAUSE) else: self.emccommand.auto(self.emc.AUTO_RESUME) # make sure linuxcnc is in AUTO mode # if Linuxcnc is paused then pushing cycle start will step the program # else the program starts from restart_line_number # after restarting it resets the restart_line_number to 0. # You must explicitily set a different restart line each time def smart_cycle_start(self, wname, cmd=None): self.emcstat.poll() if self.emcstat.task_mode != self.emc.MODE_AUTO: self.emccommand.mode(self.emc.MODE_AUTO) self.emccommand.wait_complete() self.emcstat.poll() if self.emcstat.paused: self.emccommand.auto(self.emc.AUTO_STEP) return if self.emcstat.interp_state == self.emc.INTERP_IDLE: print self.restart_line_number self.emccommand.auto(self.emc.AUTO_RUN, self.restart_line_number) self.restart_line_number = self.restart_reset_line # This restarts the program at the line specified directly (without cyscle start push) def re_start(self, wname, line): self.emccommand.mode(self.emc.MODE_AUTO) self.emccommand.wait_complete() self.emccommand.auto(self.emc.AUTO_RUN, line) self.restart_line_number = self.restart_reset_line # checks if ready for commands # calls MDI commands and when idle, periodic() will return to the mode it was in def mdi_and_return(self, wname, cmd): if self.ok_for_mdi(): self.return_to_mode = self.get_mode() # for periodic() self.set_mdi_mode() if isinstance(cmd,list): for i in cmd: print str(i) self.emccommand.mdi(str(i)) else: self.emccommand.mdi(str(cmd)) # call MDI commands, set mode if needed def mdi(self, wname, cmd): self.set_mdi_mode() if isinstance(cmd,list): for i in cmd: print str(i) self.emccommand.mdi(str(i)) else: self.emccommand.mdi(str(cmd)) # set the restart line, you can the either restart directly # or restart on the cycle start button push # see above. # reset option allows one to change the default restart after it next restarts # eg while a restart dialog is open, always restart at the line it says # when the dialog close change the line and reset both to zero def set_restart_line (self, wname, line,reset=0): self.restart_line_number = line self.restart_reset_line = reset def set_manual_mode(self): self.emcstat.poll() if self.emcstat.task_mode != self.emc.MODE_MANUAL: self.emccommand.mode(self.emc.MODE_MANUAL) self.emccommand.wait_complete() def set_mdi_mode(self): self.emcstat.poll() if self.emcstat.task_mode != self.emc.MODE_MDI: self.emccommand.mode(self.emc.MODE_MDI) self.emccommand.wait_complete() def set_auto_mode(self): self.emcstat.poll() if self.emcstat.task_mode != self.emc.MODE_AUTO: self.emccommand.mode(self.emc.MODE_AUTO) self.emccommand.wait_complete() def get_mode(self): self.emcstat.poll() return self.emcstat.task_mode def ok_for_mdi(self): self.emcstat.poll() s = self.emcstat return not s.estop and s.enabled and s.homed and \ (s.interp_state == self.emc.INTERP_IDLE) def is_spindle_running(self): self.emcstat.poll() s = self.emcstat if s.spindle_enabled: return s.spindle_speed else: return 0 def periodic(self): # return mode back to preset variable, when idle if self.return_to_mode > -1: self.emcstat.poll() if self.emcstat.interp_state == self.emc.INTERP_IDLE: self.emccommand.mode(self.return_to_mode) self.return_to_mode = -1 def __getitem__(self, item): return getattr(self, item) def __setitem__(self, item, value): return setattr(self, item, value) def get_jjogmode(self): self.emcstat.poll() if self.emcstat.motion_mode == linuxcnc.TRAJ_MODE_FREE: return JOGJOINT if self.emcstat.motion_mode == linuxcnc.TRAJ_MODE_TELEOP: return JOGTELEOP print "commands.py: unexpected motion_mode",self.emcstat.motion_mode return JOGTELEOP def jnum_for_axisnum(self,axisnum): if self.emcstat.kinematics_type != linuxcnc.KINEMATICS_IDENTITY: print ("\n%s:\n Joint jogging not supported for" "non-identity kinematics"%__file__) return -1 # emcJogCont() et al reject neg joint/axis no.s jnum = trajcoordinates.index( "xyzabcuvw"[axisnum] ) if jnum > jointcount: print ("\n%s:\n Computed joint number=%d for axisnum=%d " "exceeds jointcount=%d with trajcoordinates=%s" %(__file__,jnum,axisnum,jointcount,trajcoordinates)) # Note: primary gui should protect for this misconfiguration # decline to jog return -1 # emcJogCont() et al reject neg joint/axis no.s return jnum def get_jog_info (self,axisnum): jjogmode = self.get_jjogmode() j_or_a = axisnum if jjogmode == JOGJOINT: j_or_a = self.jnum_for_axisnum(axisnum) return jjogmode,j_or_a
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Miscellaneous tools used by OpenERP. """ from functools import wraps import cProfile from contextlib import contextmanager import subprocess import logging import os import socket import sys import threading import time import werkzeug.utils import zipfile from collections import defaultdict, Mapping from datetime import datetime from itertools import islice, izip, groupby from lxml import etree from which import which from threading import local import traceback try: from html2text import html2text except ImportError: html2text = None from config import config from cache import * from .parse_version import parse_version import openerp # get_encodings, ustr and exception_to_unicode were originally from tools.misc. # There are moved to loglevels until we refactor tools. from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa _logger = logging.getLogger(__name__) # List of etree._Element subclasses that we choose to ignore when parsing XML. # We include the *Base ones just in case, currently they seem to be subclasses of the _* ones. SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase) def find_in_path(name): try: return which(name) except IOError: return None def find_pg_tool(name): path = None if config['pg_path'] and config['pg_path'] != 'None': path = config['pg_path'] try: return which(name, path=path) except IOError: return None def exec_pg_command(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) args2 = (prog,) + args with open(os.devnull) as dn: return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT) def exec_pg_command_pipe(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) # on win32, passing close_fds=True is not compatible # with redirecting std[in/err/out] pop = subprocess.Popen((prog,) + args, bufsize= -1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name=="posix")) return pop.stdin, pop.stdout def exec_command_pipe(name, *args): prog = find_in_path(name) if not prog: raise Exception('Couldn\'t find %s' % name) # on win32, passing close_fds=True is not compatible # with redirecting std[in/err/out] pop = subprocess.Popen((prog,) + args, bufsize= -1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name=="posix")) return pop.stdin, pop.stdout #---------------------------------------------------------- # File paths #---------------------------------------------------------- #file_path_root = os.getcwd() #file_path_addons = os.path.join(file_path_root, 'addons') def file_open(name, mode="r", subdir='addons', pathinfo=False): """Open a file from the OpenERP root, using a subdir folder. Example:: >>> file_open('hr/report/timesheer.xsl') >>> file_open('addons/hr/report/timesheet.xsl') >>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True) @param name name of the file @param mode file open mode @param subdir subdirectory @param pathinfo if True returns tuple (fileobject, filepath) @return fileobject if pathinfo is False else (fileobject, filepath) """ import openerp.modules as addons adps = addons.module.ad_paths rtp = os.path.normcase(os.path.abspath(config['root_path'])) basename = name if os.path.isabs(name): # It is an absolute path # Is it below 'addons_path' or 'root_path'? name = os.path.normcase(os.path.normpath(name)) for root in adps + [rtp]: root = os.path.normcase(os.path.normpath(root)) + os.sep if name.startswith(root): base = root.rstrip(os.sep) name = name[len(base) + 1:] break else: # It is outside the OpenERP root: skip zipfile lookup. base, name = os.path.split(name) return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename) if name.replace(os.sep, '/').startswith('addons/'): subdir = 'addons' name2 = name[7:] elif subdir: name = os.path.join(subdir, name) if name.replace(os.sep, '/').startswith('addons/'): subdir = 'addons' name2 = name[7:] else: name2 = name # First, try to locate in addons_path if subdir: for adp in adps: try: return _fileopen(name2, mode=mode, basedir=adp, pathinfo=pathinfo, basename=basename) except IOError: pass # Second, try to locate in root_path return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename) def _fileopen(path, mode, basedir, pathinfo, basename=None): name = os.path.normpath(os.path.join(basedir, path)) if basename is None: basename = name # Give higher priority to module directories, which is # a more common case than zipped modules. if os.path.isfile(name): fo = open(name, mode) if pathinfo: return fo, name return fo # Support for loading modules in zipped form. # This will not work for zipped modules that are sitting # outside of known addons paths. head = os.path.normpath(path) zipname = False while os.sep in head: head, tail = os.path.split(head) if not tail: break if zipname: zipname = os.path.join(tail, zipname) else: zipname = tail zpath = os.path.join(basedir, head + '.zip') if zipfile.is_zipfile(zpath): from cStringIO import StringIO zfile = zipfile.ZipFile(zpath) try: fo = StringIO() fo.write(zfile.read(os.path.join( os.path.basename(head), zipname).replace( os.sep, '/'))) fo.seek(0) if pathinfo: return fo, name return fo except Exception: pass # Not found if name.endswith('.rml'): raise IOError('Report %r doesn\'t exist or deleted' % basename) raise IOError('File not found: %s' % basename) #---------------------------------------------------------- # iterables #---------------------------------------------------------- def flatten(list): """Flatten a list of elements into a uniqu list Author: Christophe Simonis ([email protected]) Examples:: >>> flatten(['a']) ['a'] >>> flatten('b') ['b'] >>> flatten( [] ) [] >>> flatten( [[], [[]]] ) [] >>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] ) ['a', 'b', 'c', 'd', 'e', 'f'] >>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]]) >>> flatten(t) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] """ def isiterable(x): return hasattr(x, "__iter__") r = [] for e in list: if isiterable(e): map(r.append, flatten(e)) else: r.append(e) return r def reverse_enumerate(l): """Like enumerate but in the other sens Usage:: >>> a = ['a', 'b', 'c'] >>> it = reverse_enumerate(a) >>> it.next() (2, 'c') >>> it.next() (1, 'b') >>> it.next() (0, 'a') >>> it.next() Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration """ return izip(xrange(len(l)-1, -1, -1), reversed(l)) class UpdateableStr(local): """ Class that stores an updateable string (used in wizards) """ def __init__(self, string=''): self.string = string def __str__(self): return str(self.string) def __repr__(self): return str(self.string) def __nonzero__(self): return bool(self.string) class UpdateableDict(local): """Stores an updateable dict to use in wizards """ def __init__(self, dict=None): if dict is None: dict = {} self.dict = dict def __str__(self): return str(self.dict) def __repr__(self): return str(self.dict) def clear(self): return self.dict.clear() def keys(self): return self.dict.keys() def __setitem__(self, i, y): self.dict.__setitem__(i, y) def __getitem__(self, i): return self.dict.__getitem__(i) def copy(self): return self.dict.copy() def iteritems(self): return self.dict.iteritems() def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return self.dict.itervalues() def pop(self, k, d=None): return self.dict.pop(k, d) def popitem(self): return self.dict.popitem() def setdefault(self, k, d=None): return self.dict.setdefault(k, d) def update(self, E, **F): return self.dict.update(E, F) def values(self): return self.dict.values() def get(self, k, d=None): return self.dict.get(k, d) def has_key(self, k): return self.dict.has_key(k) def items(self): return self.dict.items() def __cmp__(self, y): return self.dict.__cmp__(y) def __contains__(self, k): return self.dict.__contains__(k) def __delitem__(self, y): return self.dict.__delitem__(y) def __eq__(self, y): return self.dict.__eq__(y) def __ge__(self, y): return self.dict.__ge__(y) def __gt__(self, y): return self.dict.__gt__(y) def __hash__(self): return self.dict.__hash__() def __iter__(self): return self.dict.__iter__() def __le__(self, y): return self.dict.__le__(y) def __len__(self): return self.dict.__len__() def __lt__(self, y): return self.dict.__lt__(y) def __ne__(self, y): return self.dict.__ne__(y) class currency(float): """ Deprecate .. warning:: Don't use ! Use res.currency.round() """ def __init__(self, value, accuracy=2, rounding=None): if rounding is None: rounding=10**-accuracy self.rounding=rounding self.accuracy=accuracy def __new__(cls, value, accuracy=2, rounding=None): return float.__new__(cls, round(value, accuracy)) #def __str__(self): # display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy)) # return str(display_value) def to_xml(s): return s.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;') def get_iso_codes(lang): if lang.find('_') != -1: if lang.split('_')[0] == lang.split('_')[1].lower(): lang = lang.split('_')[0] return lang ALL_LANGUAGES = { 'ab_RU': u'Abkhazian / аҧсуа', 'am_ET': u'Amharic / አምሃርኛ', 'ar_SY': u'Arabic / الْعَرَبيّة', 'bg_BG': u'Bulgarian / български език', 'bs_BS': u'Bosnian / bosanski jezik', 'ca_ES': u'Catalan / Català', 'cs_CZ': u'Czech / Čeština', 'da_DK': u'Danish / Dansk', 'de_DE': u'German / Deutsch', 'el_GR': u'Greek / Ελληνικά', 'en_CA': u'English (CA)', 'en_GB': u'English (UK)', 'en_US': u'English (US)', 'es_AR': u'Spanish (AR) / Español (AR)', 'es_BO': u'Spanish (BO) / Español (BO)', 'es_CL': u'Spanish (CL) / Español (CL)', 'es_CO': u'Spanish (CO) / Español (CO)', 'es_CR': u'Spanish (CR) / Español (CR)', 'es_DO': u'Spanish (DO) / Español (DO)', 'es_EC': u'Spanish (EC) / Español (EC)', 'es_ES': u'Spanish / Español', 'es_GT': u'Spanish (GT) / Español (GT)', 'es_HN': u'Spanish (HN) / Español (HN)', 'es_MX': u'Spanish (MX) / Español (MX)', 'es_NI': u'Spanish (NI) / Español (NI)', 'es_PA': u'Spanish (PA) / Español (PA)', 'es_PE': u'Spanish (PE) / Español (PE)', 'es_PR': u'Spanish (PR) / Español (PR)', 'es_PY': u'Spanish (PY) / Español (PY)', 'es_SV': u'Spanish (SV) / Español (SV)', 'es_UY': u'Spanish (UY) / Español (UY)', 'es_VE': u'Spanish (VE) / Español (VE)', 'et_EE': u'Estonian / Eesti keel', 'fa_IR': u'Persian / فارس', 'fi_FI': u'Finnish / Suomi', 'fr_BE': u'French (BE) / Français (BE)', 'fr_CA': u'French (CA) / Français (CA)', 'fr_CH': u'French (CH) / Français (CH)', 'fr_FR': u'French / Français', 'gl_ES': u'Galician / Galego', 'gu_IN': u'Gujarati / ગુજરાતી', 'he_IL': u'Hebrew / עִבְרִי', 'hi_IN': u'Hindi / हिंदी', 'hr_HR': u'Croatian / hrvatski jezik', 'hu_HU': u'Hungarian / Magyar', 'id_ID': u'Indonesian / Bahasa Indonesia', 'it_IT': u'Italian / Italiano', 'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ', 'ja_JP': u'Japanese / 日本語', 'ko_KP': u'Korean (KP) / 한국어 (KP)', 'ko_KR': u'Korean (KR) / 한국어 (KR)', 'lo_LA': u'Lao / ພາສາລາວ', 'lt_LT': u'Lithuanian / Lietuvių kalba', 'lv_LV': u'Latvian / latviešu valoda', 'mk_MK': u'Macedonian / македонски јазик', 'ml_IN': u'Malayalam / മലയാളം', 'mn_MN': u'Mongolian / монгол', 'nb_NO': u'Norwegian Bokmål / Norsk bokmål', 'nl_NL': u'Dutch / Nederlands', 'nl_BE': u'Flemish (BE) / Vlaams (BE)', 'oc_FR': u'Occitan (FR, post 1500) / Occitan', 'pl_PL': u'Polish / Język polski', 'pt_BR': u'Portuguese (BR) / Português (BR)', 'pt_PT': u'Portuguese / Português', 'ro_RO': u'Romanian / română', 'ru_RU': u'Russian / русский язык', 'si_LK': u'Sinhalese / සිංහල', 'sl_SI': u'Slovenian / slovenščina', 'sk_SK': u'Slovak / Slovenský jazyk', 'sq_AL': u'Albanian / Shqip', 'sr_RS': u'Serbian (Cyrillic) / српски', 'sr@latin': u'Serbian (Latin) / srpski', 'sv_SE': u'Swedish / svenska', 'te_IN': u'Telugu / తెలుగు', 'tr_TR': u'Turkish / Türkçe', 'vi_VN': u'Vietnamese / Tiếng Việt', 'uk_UA': u'Ukrainian / українська', 'ur_PK': u'Urdu / اردو', 'zh_CN': u'Chinese (CN) / 简体中文', 'zh_HK': u'Chinese (HK)', 'zh_TW': u'Chinese (TW) / 正體字', 'th_TH': u'Thai / ภาษาไทย', 'tlh_TLH': u'Klingon', } def scan_languages(): """ Returns all languages supported by OpenERP for translation :returns: a list of (lang_code, lang_name) pairs :rtype: [(str, unicode)] """ return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1]) def get_user_companies(cr, user): def _get_company_children(cr, ids): if not ids: return [] cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),)) res = [x[0] for x in cr.fetchall()] res.extend(_get_company_children(cr, res)) return res cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,)) user_comp = cr.fetchone()[0] if not user_comp: return [] return [user_comp] + _get_company_children(cr, [user_comp]) def mod10r(number): """ Input number : account or invoice number Output return: the same number completed with the recursive mod10 key """ codec=[0,9,4,6,8,2,7,1,3,5] report = 0 result="" for digit in number: result += digit if digit.isdigit(): report = codec[ (int(digit) + report) % 10 ] return result + str((10 - report) % 10) def human_size(sz): """ Return the size in a human readable format """ if not sz: return False units = ('bytes', 'Kb', 'Mb', 'Gb') if isinstance(sz,basestring): sz=len(sz) s, i = float(sz), 0 while s >= 1024 and i < len(units)-1: s /= 1024 i += 1 return "%0.2f %s" % (s, units[i]) def logged(f): @wraps(f) def wrapper(*args, **kwargs): from pprint import pformat vector = ['Call -> function: %r' % f] for i, arg in enumerate(args): vector.append(' arg %02d: %s' % (i, pformat(arg))) for key, value in kwargs.items(): vector.append(' kwarg %10s: %s' % (key, pformat(value))) timeb4 = time.time() res = f(*args, **kwargs) vector.append(' result: %s' % pformat(res)) vector.append(' time delta: %s' % (time.time() - timeb4)) _logger.debug('\n'.join(vector)) return res return wrapper class profile(object): def __init__(self, fname=None): self.fname = fname def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): profile = cProfile.Profile() result = profile.runcall(f, *args, **kwargs) profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,))) return result return wrapper __icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD', 'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER', 'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE', 'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO', 'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT', 'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE', 'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM', 'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK', 'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK', 'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC', 'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL', 'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD', 'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY', 'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND', 'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW', 'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES', 'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT', 'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED', 'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT', 'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK', 'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE', 'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100', 'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT', 'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase', 'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner', 'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph', 'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test', 'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+', 'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver', 'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl', 'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus', 'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar', 'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow', 'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward', 'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific', 'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete' ] def icons(*a, **kw): global __icons_list return [(x, x) for x in __icons_list ] def detect_ip_addr(): """Try a very crude method to figure out a valid external IP or hostname for the current machine. Don't rely on this for binding to an interface, but it could be used as basis for constructing a remote URL to the server. """ def _detect_ip_addr(): from array import array from struct import pack, unpack try: import fcntl except ImportError: fcntl = None ip_addr = None if not fcntl: # not UNIX: host = socket.gethostname() ip_addr = socket.gethostbyname(host) else: # UNIX: # get all interfaces: nbytes = 128 * 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) names = array('B', '\0' * nbytes) #print 'names: ', names outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0] namestr = names.tostring() # try 64 bit kernel: for i in range(0, outbytes, 40): name = namestr[i:i+16].split('\0', 1)[0] if name != 'lo': ip_addr = socket.inet_ntoa(namestr[i+20:i+24]) break # try 32 bit kernel: if ip_addr is None: ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]) for ifname in [iface for iface in ifaces if iface != 'lo']: ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24]) break return ip_addr or 'localhost' try: ip_addr = _detect_ip_addr() except Exception: ip_addr = 'localhost' return ip_addr # RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT: # The server side never does any timestamp calculation, always # sends them in a naive (timezone agnostic) format supposed to be # expressed within the server timezone, and expects the clients to # provide timestamps in the server timezone as well. # It stores all timestamps in the database in naive format as well, # which also expresses the time in the server timezone. # For this reason the server makes its timezone name available via the # common/timezone_get() rpc method, which clients need to read # to know the appropriate time offset to use when reading/writing # times. def get_win32_timezone(): """Attempt to return the "standard name" of the current timezone on a win32 system. @return the standard name of the current win32 timezone, or False if it cannot be found. """ res = False if sys.platform == "win32": try: import _winreg hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS) res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code _winreg.CloseKey(current_tz_key) _winreg.CloseKey(hklm) except Exception: pass return res def detect_server_timezone(): """Attempt to detect the timezone to use on the server side. Defaults to UTC if no working timezone can be found. @return the timezone identifier as expected by pytz.timezone. """ try: import pytz except Exception: _logger.warning("Python pytz module is not available. " "Timezone will be set to UTC by default.") return 'UTC' # Option 1: the configuration option (did not exist before, so no backwards compatibility issue) # Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz # Option 3: the environment variable TZ sources = [ (config['timezone'], 'OpenERP configuration'), (time.tzname[0], 'time.tzname'), (os.environ.get('TZ',False),'TZ environment variable'), ] # Option 4: OS-specific: /etc/timezone on Unix if os.path.exists("/etc/timezone"): tz_value = False try: f = open("/etc/timezone") tz_value = f.read(128).strip() except Exception: pass finally: f.close() sources.append((tz_value,"/etc/timezone file")) # Option 5: timezone info from registry on Win32 if sys.platform == "win32": # Timezone info is stored in windows registry. # However this is not likely to work very well as the standard name # of timezones in windows is rarely something that is known to pytz. # But that's ok, it is always possible to use a config option to set # it explicitly. sources.append((get_win32_timezone(),"Windows Registry")) for (value,source) in sources: if value: try: tz = pytz.timezone(value) _logger.info("Using timezone %s obtained from %s.", tz.zone, source) return value except pytz.UnknownTimeZoneError: _logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value) _logger.warning("No valid timezone could be detected, using default UTC " "timezone. You can specify it explicitly with option 'timezone' in " "the server configuration.") return 'UTC' def get_server_timezone(): return "UTC" DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d" DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S" DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % ( DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_TIME_FORMAT) # Python's strftime supports only the format directives # that are available on the platform's libc, so in order to # be cross-platform we map to the directives required by # the C standard (1989 version), always available on platforms # with a C standard implementation. DATETIME_FORMATS_MAP = { '%C': '', # century '%D': '%m/%d/%Y', # modified %y->%Y '%e': '%d', '%E': '', # special modifier '%F': '%Y-%m-%d', '%g': '%Y', # modified %y->%Y '%G': '%Y', '%h': '%b', '%k': '%H', '%l': '%I', '%n': '\n', '%O': '', # special modifier '%P': '%p', '%R': '%H:%M', '%r': '%I:%M:%S %p', '%s': '', #num of seconds since epoch '%T': '%H:%M:%S', '%t': ' ', # tab '%u': ' %w', '%V': '%W', '%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y '%+': '%Y-%m-%d %H:%M:%S', # %Z is a special case that causes 2 problems at least: # - the timezone names we use (in res_user.context_tz) come # from pytz, but not all these names are recognized by # strptime(), so we cannot convert in both directions # when such a timezone is selected and %Z is in the format # - %Z is replaced by an empty string in strftime() when # there is not tzinfo in a datetime value (e.g when the user # did not pick a context_tz). The resulting string does not # parse back if the format requires %Z. # As a consequence, we strip it completely from format strings. # The user can always have a look at the context_tz in # preferences to check the timezone. '%z': '', '%Z': '', } POSIX_TO_LDML = { 'a': 'E', 'A': 'EEEE', 'b': 'MMM', 'B': 'MMMM', #'c': '', 'd': 'dd', 'H': 'HH', 'I': 'hh', 'j': 'DDD', 'm': 'MM', 'M': 'mm', 'p': 'a', 'S': 'ss', 'U': 'w', 'w': 'e', 'W': 'w', 'y': 'yy', 'Y': 'yyyy', # see comments above, and babel's format_datetime assumes an UTC timezone # for naive datetime objects #'z': 'Z', #'Z': 'z', } def posix_to_ldml(fmt, locale): """ Converts a posix/strftime pattern into an LDML date format pattern. :param fmt: non-extended C89/C90 strftime pattern :param locale: babel locale used for locale-specific conversions (e.g. %x and %X) :return: unicode """ buf = [] pc = False quoted = [] for c in fmt: # LDML date format patterns uses letters, so letters must be quoted if not pc and c.isalpha(): quoted.append(c if c != "'" else "''") continue if quoted: buf.append("'") buf.append(''.join(quoted)) buf.append("'") quoted = [] if pc: if c == '%': # escaped percent buf.append('%') elif c == 'x': # date format, short seems to match buf.append(locale.date_formats['short'].pattern) elif c == 'X': # time format, seems to include seconds. short does not buf.append(locale.time_formats['medium'].pattern) else: # look up format char in static mapping buf.append(POSIX_TO_LDML[c]) pc = False elif c == '%': pc = True else: buf.append(c) # flush anything remaining in quoted buffer if quoted: buf.append("'") buf.append(''.join(quoted)) buf.append("'") return ''.join(buf) def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name, tz_offset=True, ignore_unparsable_time=True): """ Convert a source timestamp string into a destination timestamp string, attempting to apply the correct offset if both the server and local timezone are recognized, or no offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ). WARNING: This method is here to allow formatting dates correctly for inclusion in strings where the client would not be able to format/offset it correctly. DO NOT use it for returning date fields directly, these are supposed to be handled by the client!! @param src_tstamp_str: the str value containing the timestamp in the server timezone. @param src_format: the format to use when parsing the server timestamp. @param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone. @param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context) @param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed using src_format or formatted using dst_format. @return local/client formatted timestamp, expressed in the local/client timezone if possible and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined. """ if not src_tstamp_str: return False res = src_tstamp_str if src_format and dst_format: # find out server timezone server_tz = get_server_timezone() try: # dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!) dt_value = datetime.strptime(src_tstamp_str, src_format) if tz_offset and dst_tz_name: try: import pytz src_tz = pytz.timezone(server_tz) dst_tz = pytz.timezone(dst_tz_name) src_dt = src_tz.localize(dt_value, is_dst=True) dt_value = src_dt.astimezone(dst_tz) except Exception: pass res = dt_value.strftime(dst_format) except Exception: # Normal ways to end up here are if strptime or strftime failed if not ignore_unparsable_time: return False return res def split_every(n, iterable, piece_maker=tuple): """Splits an iterable into length-n pieces. The last piece will be shorter if ``n`` does not evenly divide the iterable length. @param ``piece_maker``: function to build the pieces from the slices (tuple,list,...) """ iterator = iter(iterable) piece = piece_maker(islice(iterator, n)) while piece: yield piece piece = piece_maker(islice(iterator, n)) if __name__ == '__main__': import doctest doctest.testmod() class upload_data_thread(threading.Thread): def __init__(self, email, data, type): self.args = [('email',email),('type',type),('data',data)] super(upload_data_thread,self).__init__() def run(self): try: import urllib args = urllib.urlencode(self.args) fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args) fp.read() fp.close() except Exception: pass def upload_data(email, data, type='SURVEY'): a = upload_data_thread(email, data, type) a.start() return True def get_and_group_by_field(cr, uid, obj, ids, field, context=None): """ Read the values of ``field´´ for the given ``ids´´ and group ids by value. :param string field: name of the field we want to read and group by :return: mapping of field values to the list of ids that have it :rtype: dict """ res = {} for record in obj.read(cr, uid, ids, [field], context=context): key = record[field] res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id']) return res def get_and_group_by_company(cr, uid, obj, ids, context=None): return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context) # port of python 2.6's attrgetter with support for dotted notation def resolve_attr(obj, attr): for name in attr.split("."): obj = getattr(obj, name) return obj def attrgetter(*items): if len(items) == 1: attr = items[0] def g(obj): return resolve_attr(obj, attr) else: def g(obj): return tuple(resolve_attr(obj, attr) for attr in items) return g class unquote(str): """A subclass of str that implements repr() without enclosing quotation marks or escaping, keeping the original string untouched. The name come from Lisp's unquote. One of the uses for this is to preserve or insert bare variable names within dicts during eval() of a dict's repr(). Use with care. Some examples (notice that there are never quotes surrounding the ``active_id`` name: >>> unquote('active_id') active_id >>> d = {'test': unquote('active_id')} >>> d {'test': active_id} >>> print d {'test': active_id} """ def __repr__(self): return self class UnquoteEvalContext(defaultdict): """Defaultdict-based evaluation context that returns an ``unquote`` string for any missing name used during the evaluation. Mostly useful for evaluating OpenERP domains/contexts that may refer to names that are unknown at the time of eval, so that when the context/domain is converted back to a string, the original names are preserved. **Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or ``safe_eval()`` will shadow the builtins, which may cause other failures, depending on what is evaluated. Example (notice that ``section_id`` is preserved in the final result) : >>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}" >>> eval(context_str, UnquoteEvalContext(uid=1)) {'default_user_id': 1, 'default_section_id': section_id} """ def __init__(self, *args, **kwargs): super(UnquoteEvalContext, self).__init__(None, *args, **kwargs) def __missing__(self, key): return unquote(key) class mute_logger(object): """Temporary suppress the logging. Can be used as context manager or decorator. @mute_logger('openerp.plic.ploc') def do_stuff(): blahblah() with mute_logger('openerp.foo.bar'): do_suff() """ def __init__(self, *loggers): self.loggers = loggers def filter(self, record): return 0 def __enter__(self): for logger in self.loggers: assert isinstance(logger, basestring),\ "A logger name must be a string, got %s" % type(logger) logging.getLogger(logger).addFilter(self) def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): for logger in self.loggers: logging.getLogger(logger).removeFilter(self) def __call__(self, func): @wraps(func) def deco(*args, **kwargs): with self: return func(*args, **kwargs) return deco _ph = object() class CountingStream(object): """ Stream wrapper counting the number of element it has yielded. Similar role to ``enumerate``, but for use when the iteration process of the stream isn't fully under caller control (the stream can be iterated from multiple points including within a library) ``start`` allows overriding the starting index (the index before the first item is returned). On each iteration (call to :meth:`~.next`), increases its :attr:`~.index` by one. .. attribute:: index ``int``, index of the last yielded element in the stream. If the stream has ended, will give an index 1-past the stream """ def __init__(self, stream, start=-1): self.stream = iter(stream) self.index = start self.stopped = False def __iter__(self): return self def next(self): if self.stopped: raise StopIteration() self.index += 1 val = next(self.stream, _ph) if val is _ph: self.stopped = True raise StopIteration() return val def stripped_sys_argv(*strip_args): """Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses""" strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init'])) assert all(config.parser.has_option(s) for s in strip_args) takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args) longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--'))) longs_eq = tuple(l + '=' for l in longs if takes_value[l]) args = sys.argv[:] def strip(args, i): return args[i].startswith(shorts) \ or args[i].startswith(longs_eq) or (args[i] in longs) \ or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]]) return [x for i, x in enumerate(args) if not strip(args, i)] class ConstantMapping(Mapping): """ An immutable mapping returning the provided value for every single key. Useful for default value to methods """ __slots__ = ['_value'] def __init__(self, val): self._value = val def __len__(self): """ defaultdict updates its length for each individually requested key, is that really useful? """ return 0 def __iter__(self): """ same as len, defaultdict udpates its iterable keyset with each key requested, is there a point for this? """ return iter([]) def __getitem__(self, item): return self._value def dumpstacks(sig=None, frame=None): """ Signal handler: dump a stack trace for each existing thread.""" code = [] def extract_stack(stack): for filename, lineno, name, line in traceback.extract_stack(stack): yield 'File: "%s", line %d, in %s' % (filename, lineno, name) if line: yield " %s" % (line.strip(),) # code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696 # modified for python 2.5 compatibility threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')}) for th in threading.enumerate()]) for threadId, stack in sys._current_frames().items(): thread_info = threads_info.get(threadId) code.append("\n# Thread: %s (id:%s) (uid:%s)" % (thread_info and thread_info['name'] or 'n/a', threadId, thread_info and thread_info['uid'] or 'n/a')) for line in extract_stack(stack): code.append(line) if openerp.evented: # code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets import gc from greenlet import greenlet for ob in gc.get_objects(): if not isinstance(ob, greenlet) or not ob: continue code.append("\n# Greenlet: %r" % (ob,)) for line in extract_stack(ob.gr_frame): code.append(line) _logger.info("\n".join(code)) class frozendict(dict): """ An implementation of an immutable dictionary. """ def __delitem__(self, key): raise NotImplementedError("'__delitem__' not supported on frozendict") def __setitem__(self, key, val): raise NotImplementedError("'__setitem__' not supported on frozendict") def clear(self): raise NotImplementedError("'clear' not supported on frozendict") def pop(self, key, default=None): raise NotImplementedError("'pop' not supported on frozendict") def popitem(self): raise NotImplementedError("'popitem' not supported on frozendict") def setdefault(self, key, default=None): raise NotImplementedError("'setdefault' not supported on frozendict") def update(self, *args, **kwargs): raise NotImplementedError("'update' not supported on frozendict") @contextmanager def ignore(*exc): try: yield except exc: pass # Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9 if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'): def html_escape(text): return werkzeug.utils.escape(text, quote=True) else: def html_escape(text): return werkzeug.utils.escape(text) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A stopwatch to check how much time is used by bits of code.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import math import os import sys import threading import time from future.builtins import range # pylint: disable=redefined-builtin import six class Stat(object): """A set of statistics about a single value series.""" __slots__ = ("num", "min", "max", "sum", "sum_sq") def __init__(self): self.reset() def reset(self): self.num = 0 self.min = 1000000000 self.max = 0 self.sum = 0 self.sum_sq = 0 def add(self, val): self.num += 1 if self.min > val: self.min = val if self.max < val: self.max = val self.sum += val self.sum_sq += val**2 @property def avg(self): return 0 if self.num == 0 else self.sum / self.num @property def dev(self): """Standard deviation.""" if self.num == 0: return 0 return math.sqrt(max(0, self.sum_sq / self.num - (self.sum / self.num)**2)) def merge(self, other): self.num += other.num self.min = min(self.min, other.min) self.max = max(self.max, other.max) self.sum += other.sum self.sum_sq += other.sum_sq @staticmethod def build(summation, average, standard_deviation, minimum, maximum, number): stat = Stat() if number > 0: stat.num = number stat.min = minimum stat.max = maximum stat.sum = summation stat.sum_sq = number * (standard_deviation**2 + average**2) return stat @staticmethod def parse(s): if s == "num=0": return Stat() parts = (float(p.split(":")[1]) for p in s.split(", ")) return Stat.build(*parts) def __str__(self): if self.num == 0: return "num=0" return "sum: %.4f, avg: %.4f, dev: %.4f, min: %.4f, max: %.4f, num: %d" % ( self.sum, self.avg, self.dev, self.min, self.max, self.num) class StopWatchContext(object): """Time an individual call.""" __slots__ = ("_sw", "_start") def __init__(self, stopwatch, name): self._sw = stopwatch self._sw.push(name) def __enter__(self): self._start = time.time() def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback): self._sw.add(self._sw.pop(), time.time() - self._start) class TracingStopWatchContext(StopWatchContext): """Time an individual call, but also output all the enter/exit calls.""" def __enter__(self): super(TracingStopWatchContext, self).__enter__() self._log(">>> %s" % self._sw.cur_stack()) def __exit__(self, *args, **kwargs): self._log("<<< %s: %.6f secs" % (self._sw.cur_stack(), time.time() - self._start)) super(TracingStopWatchContext, self).__exit__(*args, **kwargs) def _log(self, s): print(s, file=sys.stderr) class FakeStopWatchContext(object): """A fake stopwatch context for when the stopwatch is too slow or unneeded.""" __slots__ = () def __enter__(self): pass def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback): pass fake_context = FakeStopWatchContext() class StopWatch(object): """A context manager that tracks call count and latency, and other stats. Usage: sw = stopwatch.Stopwatch() with sw("foo"): foo() with sw("bar"): bar() @sw.decorate def func(): pass func() print(sw) """ __slots__ = ("_times", "_local", "_factory") def __init__(self, enabled=True, trace=False): self._times = collections.defaultdict(Stat) self._local = threading.local() if trace: self.trace() elif enabled: self.enable() else: self.disable() def disable(self): self._factory = lambda _: fake_context def enable(self): self._factory = lambda name: StopWatchContext(self, name) def trace(self): self._factory = lambda name: TracingStopWatchContext(self, name) def custom(self, factory): self._factory = factory def __call__(self, name): return self._factory(name) def decorate(self, name_or_func): """Decorate a function/method to check its timings. To use the function's name: @sw.decorate def func(): pass To name it explicitly: @sw.decorate("name") def random_func_name(): pass Args: name_or_func: the name or the function to decorate. Returns: If a name is passed, returns this as a decorator, otherwise returns the decorated function. """ if os.environ.get("SC2_NO_STOPWATCH"): return name_or_func if callable(name_or_func) else lambda func: func def decorator(name, func): @functools.wraps(func) def _stopwatch(*args, **kwargs): with self(name): return func(*args, **kwargs) return _stopwatch if callable(name_or_func): return decorator(name_or_func.__name__, name_or_func) else: return lambda func: decorator(name_or_func, func) def push(self, name): try: self._local.stack.append(name) except AttributeError: # Using an exception is faster than using hasattr. self._local.stack = [name] def pop(self): stack = self._local.stack ret = ".".join(stack) stack.pop() return ret def cur_stack(self): return ".".join(self._local.stack) def clear(self): self._times.clear() def add(self, name, duration): self._times[name].add(duration) def __getitem__(self, name): return self._times[name] @property def times(self): return self._times def merge(self, other): for k, v in six.iteritems(other.times): self._times[k].merge(v) @staticmethod def parse(s): """Parse the output below to create a new StopWatch.""" stopwatch = StopWatch() for line in s.splitlines(): if line.strip(): parts = line.split(None) name = parts[0] if name != "%": # ie not the header line rest = (float(v) for v in parts[2:]) stopwatch.times[parts[0]].merge(Stat.build(*rest)) return stopwatch def str(self, threshold=0.1): """Return a string representation of the timings.""" if not self._times: return "" total = sum(s.sum for k, s in six.iteritems(self._times) if "." not in k) table = [["", "% total", "sum", "avg", "dev", "min", "max", "num"]] for k, v in sorted(self._times.items()): percent = 100 * v.sum / (total or 1) if percent > threshold: # ignore anything below the threshold table.append([ k, "%.2f%%" % percent, "%.4f" % v.sum, "%.4f" % v.avg, "%.4f" % v.dev, "%.4f" % v.min, "%.4f" % v.max, "%d" % v.num, ]) col_widths = [max(len(row[i]) for row in table) for i in range(len(table[0]))] out = "" for row in table: out += " " + row[0].ljust(col_widths[0]) + " " out += " ".join( val.rjust(width) for val, width in zip(row[1:], col_widths[1:])) out += "\n" return out def __str__(self): return self.str() # Global stopwatch is disabled by default to not incur the performance hit if # it's not wanted. sw = StopWatch(enabled=False)
#!/usr/bin/env python # (c) 2012, Jan-Piet Mens <jpmens () gmail.com> # (c) 2012-2014, Michael DeHaan <[email protected]> and others # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import print_function import os import glob import sys import yaml import re import optparse import datetime import cgi import warnings from collections import defaultdict from jinja2 import Environment, FileSystemLoader from six import iteritems from ansible.utils import module_docs from ansible.utils.vars import merge_hash from ansible.utils.unicode import to_bytes from ansible.errors import AnsibleError ##################################################################################### # constants and paths # if a module is added in a version of Ansible older than this, don't print the version added information # in the module documentation because everyone is assumed to be running something newer than this already. TO_OLD_TO_BE_NOTABLE = 1.3 # Get parent directory of the directory this script lives in MODULEDIR=os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' )) # The name of the DOCUMENTATION template EXAMPLE_YAML=os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml' )) _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") DEPRECATED = " (D)" NOTCORE = " (E)" ##################################################################################### def rst_ify(text): ''' convert symbols like I(this is in italics) to valid restructured text ''' try: t = _ITALIC.sub(r'*' + r"\1" + r"*", text) t = _BOLD.sub(r'**' + r"\1" + r"**", t) t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t) t = _URL.sub(r"\1", t) t = _CONST.sub(r'``' + r"\1" + r"``", t) except Exception as e: raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e))) return t ##################################################################################### def html_ify(text): ''' convert symbols like I(this is in italics) to valid HTML ''' t = cgi.escape(text) t = _ITALIC.sub("<em>" + r"\1" + "</em>", t) t = _BOLD.sub("<b>" + r"\1" + "</b>", t) t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t) t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t) t = _CONST.sub("<code>" + r"\1" + "</code>", t) return t ##################################################################################### def rst_fmt(text, fmt): ''' helper for Jinja2 to do format strings ''' return fmt % (text) ##################################################################################### def rst_xline(width, char="="): ''' return a restructured text line of a given length ''' return char * width ##################################################################################### def write_data(text, options, outputname, module): ''' dumps module output to a file or the screen, as requested ''' if options.output_dir is not None: fname = os.path.join(options.output_dir, outputname % module) fname = fname.replace(".py","") f = open(fname, 'w') f.write(text.encode('utf-8')) f.close() else: print(text) ##################################################################################### def list_modules(module_dir, depth=0): ''' returns a hash of categories, each category being a hash of module names to file paths ''' categories = dict() module_info = dict() aliases = defaultdict(set) # * windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files # * One glob level for every module level that we're going to traverse files = glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir) for module_path in files: if module_path.endswith('__init__.py'): continue category = categories mod_path_only = os.path.dirname(module_path[len(module_dir) + 1:]) # Start at the second directory because we don't want the "vendor" # directories (core, extras) for new_cat in mod_path_only.split('/')[1:]: if new_cat not in category: category[new_cat] = dict() category = category[new_cat] module = os.path.splitext(os.path.basename(module_path))[0] if module in module_docs.BLACKLIST_MODULES: # Do not list blacklisted modules continue if module.startswith("_") and os.path.islink(module_path): source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0] module = module.replace("_","",1) aliases[source].add(module) continue category[module] = module_path module_info[module] = module_path # keep module tests out of becoming module docs if 'test' in categories: del categories['test'] return module_info, categories, aliases ##################################################################################### def generate_parser(): ''' generate an optparse parser ''' p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options] arg1 arg2', description='Generate module documentation from metadata', ) p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number") p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path") p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates") p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type") p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files") p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules") p.add_option('-V', action='version', help='Show version number and exit') return p ##################################################################################### def jinja2_environment(template_dir, typ): env = Environment(loader=FileSystemLoader(template_dir), variable_start_string="@{", variable_end_string="}@", trim_blocks=True, ) env.globals['xline'] = rst_xline if typ == 'rst': env.filters['convert_symbols_to_format'] = rst_ify env.filters['html_ify'] = html_ify env.filters['fmt'] = rst_fmt env.filters['xline'] = rst_xline template = env.get_template('rst.j2') outputname = "%s_module.rst" else: raise Exception("unknown module format type: %s" % typ) return env, template, outputname ##################################################################################### def too_old(added): if not added: return False try: added_tokens = str(added).split(".") readded = added_tokens[0] + "." + added_tokens[1] added_float = float(readded) except ValueError as e: warnings.warn("Could not parse %s: %s" % (added, str(e))) return False return (added_float < TO_OLD_TO_BE_NOTABLE) def process_module(module, options, env, template, outputname, module_map, aliases): fname = module_map[module] if isinstance(fname, dict): return "SKIPPED" basename = os.path.basename(fname) deprecated = False # ignore files with extensions if not basename.endswith(".py"): return elif module.startswith("_"): if os.path.islink(fname): return # ignore, its an alias deprecated = True module = module.replace("_","",1) print("rendering: %s" % module) # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose) # crash if module is missing documentation and not explicitly hidden from docs index if doc is None: sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) sys.exit(1) if deprecated and 'deprecated' not in doc: sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module)) sys.exit(1) if "/core/" in fname: doc['core'] = True else: doc['core'] = False if module in aliases: doc['aliases'] = aliases[module] all_keys = [] if not 'version_added' in doc: sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module) sys.exit(1) added = 0 if doc['version_added'] == 'historical': del doc['version_added'] else: added = doc['version_added'] # don't show version added information if it's too old to be called out if too_old(added): del doc['version_added'] if 'options' in doc and doc['options']: for (k,v) in iteritems(doc['options']): # don't show version added information if it's too old to be called out if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']): del doc['options'][k]['version_added'] if not 'description' in doc['options'][k]: raise AnsibleError("Missing required description for option %s in %s " % (k, module)) if not 'required' in doc['options'][k]: raise AnsibleError("Missing required 'required' for option %s in %s " % (k, module)) if not isinstance(doc['options'][k]['description'],list): doc['options'][k]['description'] = [doc['options'][k]['description']] all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = fname doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = options.ansible_version doc['plainexamples'] = examples #plain text if returndocs: try: doc['returndocs'] = yaml.safe_load(returndocs) except: print("could not load yaml: %s" % returndocs) raise else: doc['returndocs'] = None # here is where we build the table of contents... try: text = template.render(doc) except Exception as e: raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e))) write_data(text, options, outputname, module) return doc['short_description'] ##################################################################################### def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases): modstring = module if modstring.startswith('_'): modstring = module[1:] modname = modstring if module in deprecated: modstring = modstring + DEPRECATED elif module not in core: modstring = modstring + NOTCORE category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname))) def process_category(category, categories, options, env, template, outputname): ### FIXME: # We no longer conceptually deal with a mapping of category names to # modules to file paths. Instead we want several different records: # (1) Mapping of module names to file paths (what's presently used # as categories['all'] # (2) Mapping of category names to lists of module names (what you'd # presently get from categories[category_name][subcategory_name].keys() # (3) aliases (what's presently in categories['_aliases'] # # list_modules() now returns those. Need to refactor this function and # main to work with them. module_map = categories[category] module_info = categories['all'] aliases = {} if '_aliases' in categories: aliases = categories['_aliases'] category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category) category_file = open(category_file_path, "w") print("*** recording category %s in %s ***" % (category, category_file_path)) # start a new category file category = category.replace("_"," ") category = category.title() modules = [] deprecated = [] core = [] for module in module_map.keys(): if isinstance(module_map[module], dict): for mod in (m for m in module_map[module].keys() if m in module_info): if mod.startswith("_"): deprecated.append(mod) elif '/core/' in module_info[mod][0]: core.append(mod) else: if module not in module_info: continue if module.startswith("_"): deprecated.append(module) elif '/core/' in module_info[module][0]: core.append(module) modules.append(module) modules.sort(key=lambda k: k[1:] if k.startswith('_') else k) category_header = "%s Modules" % (category.title()) underscores = "`" * len(category_header) category_file.write("""\ %s %s .. toctree:: :maxdepth: 1 """ % (category_header, underscores)) sections = [] for module in modules: if module in module_map and isinstance(module_map[module], dict): sections.append(module) continue else: print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases) sections.sort() for section in sections: category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) category_file.write(".. toctree:: :maxdepth: 1\n\n") section_modules = module_map[section].keys() section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k) #for module in module_map[section]: for module in (m for m in section_modules if m in module_info): print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases) category_file.write("""\n\n .. note:: - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules. - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_ """ % (DEPRECATED, NOTCORE)) category_file.close() # TODO: end a new category file ##################################################################################### def validate_options(options): ''' validate option parser options ''' if not options.module_dir: print("--module-dir is required", file=sys.stderr) sys.exit(1) if not os.path.exists(options.module_dir): print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr) sys.exit(1) if not options.template_dir: print("--template-dir must be specified") sys.exit(1) ##################################################################################### def main(): p = generate_parser() (options, args) = p.parse_args() validate_options(options) env, template, outputname = jinja2_environment(options.template_dir, options.type) mod_info, categories, aliases = list_modules(options.module_dir) categories['all'] = mod_info categories['_aliases'] = aliases category_names = [c for c in categories.keys() if not c.startswith('_')] category_names.sort() # Write master category list category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") with open(category_list_path, "w") as category_list_file: category_list_file.write("Module Index\n") category_list_file.write("============\n") category_list_file.write("\n\n") category_list_file.write(".. toctree::\n") category_list_file.write(" :maxdepth: 1\n\n") for category in category_names: category_list_file.write(" list_of_%s_modules\n" % category) # # Import all the docs into memory # module_map = mod_info.copy() skipped_modules = set() for modname in module_map: result = process_module(modname, options, env, template, outputname, module_map, aliases) if result == 'SKIPPED': del categories['all'][modname] else: categories['all'][modname] = (categories['all'][modname], result) # # Render all the docs to rst via category pages # for category in category_names: process_category(category, categories, options, env, template, outputname) if __name__ == '__main__': main()
""" Drive without using the trained model. Just a simple algorithm. """ from sensor_client import SensorClient from rccar_client import RCCarClient from statistics import mean import time def get_area_means(ir_sweep): area_means = [] mean_size = 6 for i in range(0, len(ir_sweep), mean_size): area_means.append(mean(ir_sweep[i:i+mean_size])) return area_means def get_max_area(means): max_i = 0 max_mean = 0 for i, m in enumerate(means): if m > max_mean: max_mean = m max_i = i return max_i def get_proximity(ir_sweep): if min(ir_sweep[10:20]) < 22: print('Proximity alert!') return True else: return False def get_action(ir_sweep): area_means = get_area_means(ir_sweep) print(area_means) max_area = get_max_area(area_means) print(max_area) if max_area == 0 or max_area == 1: action = 1 # Turn left. elif max_area == 4 or max_area == 5: action = 0 # Turn right. else: action = 2 # Go straight return action if __name__ == '__main__': # Setup our two servers. sensor_host = '192.168.2.10' car_host = '192.168.2.9' try: sensors = SensorClient(host=sensor_host) car = RCCarClient(host=car_host) except: print("Issue setting up sensors or car.") raise input("Ready to roll! Press enter to go.") while True: # Get state. readings = sensors.get_readings() ir_sweep = readings['state'][:-1] print(ir_sweep) if get_proximity(ir_sweep): car.recover() time.sleep(4) continue # Get action. print("Getting action.") action = get_action(ir_sweep) print("Taking action %d" % action) # Take action. car.step(action) time.sleep(2) print("-"*80) car.cleanup_gpio()
import os import logging logger = logging.getLogger(__name__) from core.config import conf from passwords_handler import passwords_handler from PySide.QtGui import * from PySide.QtCore import * #Config parser OPTION_UNRAR_REMOVE_FILES = "unrar_remove_files" class Preferences(QVBoxLayout): """""" def __init__(self): """""" QVBoxLayout.__init__(self) # Options frame = QGroupBox(_('Options:')) label_remove_files = QLabel(_('Delete files after extract:')) self.remove_files_box = QCheckBox() vbox = QVBoxLayout() frame.setLayout(vbox) hbox_remove_files = QHBoxLayout() hbox_remove_files.addWidget(label_remove_files) hbox_remove_files.addWidget(self.remove_files_box) hbox_remove_files.addStretch() vbox.addLayout(hbox_remove_files) self.addWidget(frame) # Passwords frame2 = QGroupBox(_("Passwords (one per line):")) vbox2 = QVBoxLayout() frame2.setLayout(vbox2) self.text_view = QPlainTextEdit() vbox2.addWidget(self.text_view) self.addWidget(frame2) self.load_pwd() def load_pwd(self): """""" pwd_set = passwords_handler.get_passwords() lines = "\n".join(pwd_set) if lines: self.text_view.setPlainText(lines) def load(self): """""" if conf.get_addon_option(OPTION_UNRAR_REMOVE_FILES, default=True, is_bool=True): self.remove_files_box.toggle() #activate self.load_pwd() def save(self): """""" conf.set_addon_option(OPTION_UNRAR_REMOVE_FILES, self.remove_files_box.isChecked(), is_bool=True) # txt = self.text_view.toPlainText() passwords_handler.replace(txt.splitlines()) passwords_handler.save()
"""Helpers for dealing with software versioning.""" from distutils.version import StrictVersion def is_version_higher(version1, version2): """Check if a version is higher than another. This takes two software versions in the usual b"x.y" form and split them on the decimal character, converting both parts to ints, e.g. b"3.2" becomes (3, 2). It then does a comparison of the two tuples, and returns C{True} if C{version1} is greater than or equal to C{version2}. @param version1: The first version to compare as C{bytes}. @param version2: The second version to compare as C{bytes}. @return: C{True} if the first version is greater than or equal to the second. """ version1 = version1.decode("ascii") version2 = version2.decode("ascii") return StrictVersion(version1) >= StrictVersion(version2) def sort_versions(versions): """Sort a list of software versions in from the highest to the lowest. @param version: a C{list} of C{bytes} describing a version. """ strict_versions = sorted( [StrictVersion(version.decode("ascii")) for version in versions], reverse=True) return [str(strict_version).encode("ascii") for strict_version in strict_versions]
#!/usr/bin/python from pieva import * from palette import ColorPalette from screen import Screen from palette import ColorPalette import numpy as np import time import fastopc as opc import random from core import NoiseGenerator import OSC import threading import sys #from pythonosc import dispatcher #from pythonosc import osc_server greenPalette = ColorPalette(CSVfilename="palettes/green_grass") rainbowPalette = ColorPalette(CSVfilename="palettes/rainbow") pinkPalette = ColorPalette(CSVfilename="palettes/pink") mainPalette = greenPalette flash=0 def display_img(filename): import matplotlib.image as mpimg img = mpimg.imread(filename) if img.dtype == np.uint8: img = img.astype(np.uint32) elif img.dtype == np.float32: img = (img * 255).astype(np.uint32) bitmap = img[:,:,0] << 16 | img[:,:,1] << 8 | img[:,:,2] print "Sending", len(bitmap[0]), "X", len(bitmap), "bitmap", filename global screen global flash flash = 1 screen.send(bitmap) time.sleep(3) flash = 0 ########### pyosc stuff # define a message-handler function for the server to call. def printing_handler(addr, tags, stuff, source): msg_string = "%s [%s] %s" % (addr, tags, str(stuff)) print "OSCServer Got: '%s' from %s\n" % (msg_string, OSC.getUrlStr(source)) # send a reply to the client. msg = OSC.OSCMessage("/printed") msg.append(msg_string) return msg # define a message-handler function for the server to call. def pallete_handler(addr, tags, stuff, source): msg_string = "%s [%s] %s" % (addr, tags, str(stuff)) print "PHOSCServer Got: '%s' from %s\n" % (msg_string, OSC.getUrlStr(source)) global mainPalette global greenPalette global rainbowPalette global pinkPalette if (stuff[0] == 0): print "Switching palette to green" mainPalette = greenPalette display_img('palettes/test.png') if (stuff[0] == 1): print "Switching palette to rainbow" mainPalette = rainbowPalette if (stuff[0] == 2): print "Switching palette to pink" mainPalette = pinkPalette # return msg ################ class NoiseParams: octaves = 1 persistence = 0.5 lacunarity = 2.0 wavelength = 32 xScrollSpeed = 0 yScrollSpeed = 0 amplitude = 127 offset = 128 def __init__(self, octaves, persistence, lacunarity, wavelength, xScrollSpeed, yScrollSpeed, amplitude, offset): self.octaves = octaves self.persistence = persistence self.lacunarity = lacunarity self.wavelength = wavelength self.xScrollSpeed = xScrollSpeed self.yScrollSpeed = yScrollSpeed self.amplitude = amplitude self.offset = offset #paletteFileCSV="palettes/green_grass"# #paletteFileCSV="palettes/rainbow" #paletteFileCSV="palettes/pink" #mainPalette = ColorPalette(CSVfilename=paletteFileCSV) width = 140 height = 140 sun = NoiseParams( octaves = 1, persistence = 0.5, lacunarity = 2.0, wavelength = width * 8.0, xScrollSpeed = 1, yScrollSpeed = 0, amplitude = 95, offset = 140) grass = NoiseParams( octaves = 4, persistence = 0.702, lacunarity = 2.0, wavelength = width / 8, xScrollSpeed = 0, yScrollSpeed = 5, amplitude = 120, offset = 120) screen = Screen(sections, ['127.0.0.1:7891']) screen.dimm(0) targetFPS = 24 targetFrameTime = 1./targetFPS timeCounter = int(random.random() * 65535) #dispatcher = dispatcher.Dispatcher() #dispatcher.map("/MM_Remote/Control/objectPosition", set_pallete, "Set Pallete: " ) #dispatcher.map("/volume", set_pallete, "Pallete") #server = osc_server.ThreadingOSCUDPServer( ('127.0.0.1', 54321), dispatcher) #print("Serving on {}".format(server.server_address)) #server.serve_forever() ########################### # EXPERIMENT listen_address = ('localhost', 54321) send_address = ('localhost', 12345) try: #c = OSC.OSCClient() #c.connect(listen_address) s = OSC.ThreadingOSCServer(listen_address)#, c)#, return_port=54321) print s # Set Server to return errors as OSCMessages to "/error" s.setSrvErrorPrefix("/error") # Set Server to reply to server-info requests with OSCMessages to "/serverinfo" s.setSrvInfoPrefix("/serverinfo") # this registers a 'default' handler (for unmatched messages), # an /'error' handler, an '/info' handler. # And, if the client supports it, a '/subscribe' & '/unsubscribe' handler s.addDefaultHandlers() #s.addMsgHandler("/print", printing_handler) s.addMsgHandler("default", printing_handler) #s.addMsgHandler("/MM_Remote/Control/activeObjectsID", pallete_handler) s.addMsgHandler("/MM_Remote/Control/activeObjectsPosition", pallete_handler) # if client & server are bound to 'localhost', server replies return to itself! s.addMsgHandler("/printed", s.msgPrinter_handler) s.addMsgHandler("/serverinfo", s.msgPrinter_handler) print "Registered Callback-functions:" for addr in s.getOSCAddressSpace(): print addr print "\nStarting OSCServer. Use ctrl-C to quit." st = threading.Thread(target=s.serve_forever) st.start() c2 = OSC.OSCClient() c2.connect(send_address) #subreq = OSC.OSCMessage("/MashMachine/Control/getActiveObjectsPosition") paired = 0 while paired == 0: try: print "Pairing..." subreq = OSC.OSCMessage("/MashMachine/Global/makePairing") subreq.append("localhost") subreq.append(54321) c2.send(subreq) #time.sleep(0.5) print "Subscribing..." subreq = OSC.OSCMessage("/MashMachine/Global/subscribeObjectsID") subreq.append("localhost") c2.send(subreq) paired = 1 except(OSC.OSCClientError): print "Pairing or Subscribing failed.." time.sleep(1) except(KeyboardInterrupt): print "Continue without pairing.." paired = 1 #time.sleep(0.5) ## /MashMachine/Global/subscribeObjectsID ## /MashMachine/Global/subscribeObjectsPosition print("eina.. Control+C to stop") while True: startTime = time.time() global flash if not flash: screen.render(width, height, timeCounter/640., [grass, sun], mainPalette) endTime = time.time() timeToWait = targetFrameTime - (endTime - startTime) print"Frame time: ", (endTime - startTime) if timeToWait < 0: print("late!", timeToWait) timeToWait = 0 time.sleep(timeToWait) timeCounter +=1 except (KeyboardInterrupt): #, OSC.OSCClientError, SystemExit): print "\nClosing OSCServer." s.close() print "Waiting for Server-thread to finish" st.join() #print "Closing OSCClient" #c.close() print "Done" sys.exit(0)
""" Django settings for HappyDogs project. Generated by 'django-admin startproject' using Django 1.10.4. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_*zd(-(!idd79&exz8mn#=&g=pfno=^r)3p1mssm*d^=y9*1+t' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ # 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'HappyDogs.apps.HappyDogs', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'HappyDogs.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'HappyDogs' , 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'HappyDogs.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'happy_dogs_database', 'USER': 'happy_dogs_user', 'PASSWORD': 'Password1', 'HOST': 'localhost', 'PORT': '5432', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Monterrey' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'deployment/static') STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/'
#!/usr/bin/env python # (c) 2012, Jan-Piet Mens <jpmens () gmail.com> # (c) 2012-2014, Michael DeHaan <[email protected]> and others # (c) 2017 Ansible Project # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function __metaclass__ = type import datetime import glob import optparse import os import re import sys import warnings from collections import defaultdict from copy import deepcopy from distutils.version import LooseVersion from functools import partial from pprint import PrettyPrinter try: from html import escape as html_escape except ImportError: # Python-3.2 or later import cgi def html_escape(text, quote=True): return cgi.escape(text, quote) import jinja2 import yaml from jinja2 import Environment, FileSystemLoader from six import iteritems, string_types from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes, to_text from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.loader import fragment_loader from ansible.utils import plugin_docs from ansible.utils.display import Display from ansible.utils._build_helpers import update_file_if_different ##################################################################################### # constants and paths # if a module is added in a version of Ansible older than this, don't print the version added information # in the module documentation because everyone is assumed to be running something newer than this already. TOO_OLD_TO_BE_NOTABLE = 1.3 # Get parent directory of the directory this script lives in MODULEDIR = os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' )) # The name of the DOCUMENTATION template EXAMPLE_YAML = os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml' )) _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _LINK = re.compile(r"L\(([^)]+),([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") _RULER = re.compile(r"HORIZONTALLINE") DEPRECATED = b" (D)" pp = PrettyPrinter() display = Display() # kludge_ns gives us a kludgey way to set variables inside of loops that need to be visible outside # the loop. We can get rid of this when we no longer need to build docs with less than Jinja-2.10 # http://jinja.pocoo.org/docs/2.10/templates/#assignments # With Jinja-2.10 we can use jinja2's namespace feature, restoring the namespace template portion # of: fa5c0282a4816c4dd48e80b983ffc1e14506a1f5 NS_MAP = {} def to_kludge_ns(key, value): NS_MAP[key] = value return "" def from_kludge_ns(key): return NS_MAP[key] # The max filter was added in Jinja2-2.10. Until we can require that version, use this def do_max(seq): return max(seq) def rst_ify(text): ''' convert symbols like I(this is in italics) to valid restructured text ''' try: t = _ITALIC.sub(r"*\1*", text) t = _BOLD.sub(r"**\1**", t) t = _MODULE.sub(r":ref:`\1 <\1_module>`", t) t = _LINK.sub(r"`\1 <\2>`_", t) t = _URL.sub(r"\1", t) t = _CONST.sub(r"``\1``", t) t = _RULER.sub(r"------------", t) except Exception as e: raise AnsibleError("Could not process (%s) : %s" % (text, e)) return t def html_ify(text): ''' convert symbols like I(this is in italics) to valid HTML ''' if not isinstance(text, string_types): text = to_text(text) t = html_escape(text) t = _ITALIC.sub(r"<em>\1</em>", t) t = _BOLD.sub(r"<b>\1</b>", t) t = _MODULE.sub(r"<span class='module'>\1</span>", t) t = _URL.sub(r"<a href='\1'>\1</a>", t) t = _LINK.sub(r"<a href='\2'>\1</a>", t) t = _CONST.sub(r"<code>\1</code>", t) t = _RULER.sub(r"<hr/>", t) return t.strip() def rst_fmt(text, fmt): ''' helper for Jinja2 to do format strings ''' return fmt % (text) def rst_xline(width, char="="): ''' return a restructured text line of a given length ''' return char * width test_list = partial(is_sequence, include_strings=False) def normalize_options(value): """Normalize boolean option value.""" if value.get('type') == 'bool' and 'default' in value: try: value['default'] = boolean(value['default'], strict=True) except TypeError: pass return value def write_data(text, output_dir, outputname, module=None): ''' dumps module output to a file or the screen, as requested ''' if output_dir is not None: if module: outputname = outputname % module if not os.path.exists(output_dir): os.makedirs(output_dir) fname = os.path.join(output_dir, outputname) fname = fname.replace(".py", "") update_file_if_different(fname, to_bytes(text)) else: print(text) def get_plugin_info(module_dir, limit_to=None, verbose=False): ''' Returns information about plugins and the categories that they belong to :arg module_dir: file system path to the top of the plugin directory :kwarg limit_to: If given, this is a list of plugin names to generate information for. All other plugins will be ignored. :returns: Tuple of two dicts containing module_info, categories, and aliases and a set listing deprecated modules: :module_info: mapping of module names to information about them. The fields of the dict are: :path: filesystem path to the module :deprecated: boolean. True means the module is deprecated otherwise not. :aliases: set of aliases to this module name :metadata: The modules metadata (as recorded in the module) :doc: The documentation structure for the module :seealso: The list of dictionaries with references to related subjects :examples: The module's examples :returndocs: The module's returndocs :categories: maps category names to a dict. The dict contains at least one key, '_modules' which contains a list of module names in that category. Any other keys in the dict are subcategories with the same structure. ''' categories = dict() module_info = defaultdict(dict) # * windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files # * One glob level for every module level that we're going to traverse files = ( glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir) ) for module_path in files: # Do not list __init__.py files if module_path.endswith('__init__.py'): continue # Do not list blacklisted modules module = os.path.splitext(os.path.basename(module_path))[0] if module in plugin_docs.BLACKLIST['MODULE'] or module == 'base': continue # If requested, limit module documentation building only to passed-in # modules. if limit_to is not None and module.lower() not in limit_to: continue deprecated = False if module.startswith("_"): if os.path.islink(module_path): # Handle aliases source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0] module = module.replace("_", "", 1) aliases = module_info[source].get('aliases', set()) aliases.add(module) # In case we just created this via get()'s fallback module_info[source]['aliases'] = aliases continue else: # Handle deprecations module = module.replace("_", "", 1) deprecated = True # # Regular module to process # # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples, returndocs, metadata = plugin_docs.get_docstring(module_path, fragment_loader, verbose=verbose) if metadata and 'removed' in metadata.get('status'): continue category = categories # Start at the second directory because we don't want the "vendor" mod_path_only = os.path.dirname(module_path[len(module_dir):]) primary_category = '' module_categories = [] # build up the categories that this module belongs to for new_cat in mod_path_only.split('/')[1:]: if new_cat not in category: category[new_cat] = dict() category[new_cat]['_modules'] = [] module_categories.append(new_cat) category = category[new_cat] category['_modules'].append(module) # the category we will use in links (so list_of_all_plugins can point to plugins/action_plugins/*' if module_categories: primary_category = module_categories[0] if 'options' in doc and doc['options'] is None: display.error("*** ERROR: DOCUMENTATION.options must be a dictionary/hash when used. ***") pos = getattr(doc, "ansible_pos", None) if pos is not None: display.error("Module position: %s, %d, %d" % doc.ansible_pos) doc['options'] = dict() for key, opt in doc.get('options', {}).items(): doc['options'][key] = normalize_options(opt) # save all the information module_info[module] = {'path': module_path, 'source': os.path.relpath(module_path, module_dir), 'deprecated': deprecated, 'aliases': module_info[module].get('aliases', set()), 'metadata': metadata, 'doc': doc, 'examples': examples, 'returndocs': returndocs, 'categories': module_categories, 'primary_category': primary_category, } # keep module tests out of becoming module docs if 'test' in categories: del categories['test'] return module_info, categories def generate_parser(): ''' generate an optparse parser ''' p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options] arg1 arg2', description='Generate module documentation from metadata', ) p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number") p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path") p.add_option("-P", "--plugin-type", action="store", dest="plugin_type", default='module', help="The type of plugin (module, lookup, etc)") p.add_option("-T", "--template-dir", action="append", dest="template_dir", help="directory containing Jinja2 templates") p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files") p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules") p.add_option("-l", "--limit-to-modules", '--limit-to', action="store", dest="limit_to", default=None, help="Limit building module documentation to comma-separated list of plugins. Specify non-existing plugin name for no plugins.") p.add_option('-V', action='version', help='Show version number and exit') p.add_option('-v', '--verbose', dest='verbosity', default=0, action="count", help="verbose mode (increase number of 'v's for more)") return p def jinja2_environment(template_dir, typ, plugin_type): env = Environment(loader=FileSystemLoader(template_dir), variable_start_string="@{", variable_end_string="}@", trim_blocks=True) env.globals['xline'] = rst_xline # Can be removed (and template switched to use namespace) when we no longer need to build # with <Jinja-2.10 env.globals['to_kludge_ns'] = to_kludge_ns env.globals['from_kludge_ns'] = from_kludge_ns if 'max' not in env.filters: # Jinja < 2.10 env.filters['max'] = do_max templates = {} if typ == 'rst': env.filters['rst_ify'] = rst_ify env.filters['html_ify'] = html_ify env.filters['fmt'] = rst_fmt env.filters['xline'] = rst_xline env.tests['list'] = test_list templates['plugin'] = env.get_template('plugin.rst.j2') if plugin_type == 'module': name = 'modules' else: name = 'plugins' templates['category_list'] = env.get_template('%s_by_category.rst.j2' % name) templates['support_list'] = env.get_template('%s_by_support.rst.j2' % name) templates['list_of_CATEGORY_modules'] = env.get_template('list_of_CATEGORY_%s.rst.j2' % name) else: raise Exception("Unsupported format type: %s" % typ) return templates def too_old(added): if not added: return False try: added_tokens = str(added).split(".") readded = added_tokens[0] + "." + added_tokens[1] added_float = float(readded) except ValueError as e: warnings.warn("Could not parse %s: %s" % (added, str(e))) return False return added_float < TOO_OLD_TO_BE_NOTABLE def process_plugins(module_map, templates, outputname, output_dir, ansible_version, plugin_type): for module in module_map: display.display("rendering: %s" % module) fname = module_map[module]['path'] display.vvvvv(pp.pformat(('process_plugins info: ', module_map[module]))) # crash if module is missing documentation and not explicitly hidden from docs index if module_map[module]['doc'] is None: display.error("%s MISSING DOCUMENTATION" % (fname,)) _doc = {plugin_type: module, 'version_added': '2.4', 'filename': fname} module_map[module]['doc'] = _doc # continue # Going to reference this heavily so make a short name to reference it by doc = module_map[module]['doc'] display.vvvvv(pp.pformat(('process_plugins doc: ', doc))) # add some defaults for plugins that dont have most of the info doc['module'] = doc.get('module', module) doc['version_added'] = doc.get('version_added', 'historical') doc['plugin_type'] = plugin_type if module_map[module]['deprecated'] and 'deprecated' not in doc: display.warning("%s PLUGIN MISSING DEPRECATION DOCUMENTATION: %s" % (fname, 'deprecated')) required_fields = ('short_description',) for field in required_fields: if field not in doc: display.warning("%s PLUGIN MISSING field '%s'" % (fname, field)) not_nullable_fields = ('short_description',) for field in not_nullable_fields: if field in doc and doc[field] in (None, ''): print("%s: WARNING: MODULE field '%s' DOCUMENTATION is null/empty value=%s" % (fname, field, doc[field])) if 'version_added' not in doc: display.error("*** ERROR: missing version_added in: %s ***\n" % module) # # The present template gets everything from doc so we spend most of this # function moving data into doc for the template to reference # if module_map[module]['aliases']: doc['aliases'] = module_map[module]['aliases'] # don't show version added information if it's too old to be called out added = 0 if doc['version_added'] == 'historical': del doc['version_added'] else: added = doc['version_added'] # Strip old version_added for the module if too_old(added): del doc['version_added'] option_names = [] if 'options' in doc and doc['options']: for (k, v) in iteritems(doc['options']): # Error out if there's no description if 'description' not in doc['options'][k]: raise AnsibleError("Missing required description for option %s in %s " % (k, module)) # Error out if required isn't a boolean (people have been putting # information on when something is required in here. Those need # to go in the description instead). required_value = doc['options'][k].get('required', False) if not isinstance(required_value, bool): raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module)) # Strip old version_added information for options if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']): del doc['options'][k]['version_added'] # Make sure description is a list of lines for later formatting if not isinstance(doc['options'][k]['description'], list): doc['options'][k]['description'] = [doc['options'][k]['description']] option_names.append(k) option_names.sort() doc['option_keys'] = option_names doc['filename'] = fname doc['source'] = module_map[module]['source'] doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = ansible_version # check the 'deprecated' field in doc. We expect a dict potentially with 'why', 'version', and 'alternative' fields # examples = module_map[module]['examples'] # print('\n\n%s: type of examples: %s\n' % (module, type(examples))) # if examples and not isinstance(examples, (str, unicode, list)): # raise TypeError('module %s examples is wrong type (%s): %s' % (module, type(examples), examples)) # use 'examples' for 'plainexamples' if 'examples' is a string if isinstance(module_map[module]['examples'], string_types): doc['plainexamples'] = module_map[module]['examples'] # plain text else: doc['plainexamples'] = '' doc['metadata'] = module_map[module]['metadata'] display.vvvvv(pp.pformat(module_map[module])) if module_map[module]['returndocs']: try: doc['returndocs'] = yaml.safe_load(module_map[module]['returndocs']) except Exception as e: print("%s:%s:yaml error:%s:returndocs=%s" % (fname, module, e, module_map[module]['returndocs'])) doc['returndocs'] = None else: doc['returndocs'] = None doc['author'] = doc.get('author', ['UNKNOWN']) if isinstance(doc['author'], string_types): doc['author'] = [doc['author']] display.v('about to template %s' % module) display.vvvvv(pp.pformat(doc)) text = templates['plugin'].render(doc) if LooseVersion(jinja2.__version__) < LooseVersion('2.10'): # jinja2 < 2.10's indent filter indents blank lines. Cleanup text = re.sub(' +\n', '\n', text) write_data(text, output_dir, outputname, module) def process_categories(plugin_info, categories, templates, output_dir, output_name, plugin_type): # For some reason, this line is changing plugin_info: # text = templates['list_of_CATEGORY_modules'].render(template_data) # To avoid that, make a deepcopy of the data. # We should track that down and fix it at some point in the future. plugin_info = deepcopy(plugin_info) for category in sorted(categories.keys()): module_map = categories[category] category_filename = output_name % category display.display("*** recording category %s in %s ***" % (category, category_filename)) # start a new category file category_name = category.replace("_", " ") category_title = category_name.title() subcategories = dict((k, v) for k, v in module_map.items() if k != '_modules') template_data = {'title': category_title, 'category_name': category_name, 'category': module_map, 'subcategories': subcategories, 'module_info': plugin_info, 'plugin_type': plugin_type } text = templates['list_of_CATEGORY_modules'].render(template_data) write_data(text, output_dir, category_filename) def process_support_levels(plugin_info, templates, output_dir, plugin_type): supported_by = {'Ansible Core Team': {'slug': 'core_supported', 'modules': [], 'output': 'core_maintained.rst', 'blurb': "These are :doc:`modules maintained by the" " Ansible Core Team<core_maintained>` and will always ship" " with Ansible itself."}, 'Ansible Network Team': {'slug': 'network_supported', 'modules': [], 'output': 'network_maintained.rst', 'blurb': "These are :doc:`modules maintained by the" " Ansible Network Team<network_maintained>` in" " a relationship similar to how the Ansible Core Team" " maintains the Core modules."}, 'Ansible Partners': {'slug': 'certified_supported', 'modules': [], 'output': 'partner_maintained.rst', 'blurb': """ Some examples of :doc:`Certified Modules<partner_maintained>` are those submitted by other companies. Maintainers of these types of modules must watch for any issues reported or pull requests raised against the module. The Ansible Core Team will review all modules becoming certified. Core committers will review proposed changes to existing Certified Modules once the community maintainers of the module have approved the changes. Core committers will also ensure that any issues that arise due to Ansible engine changes will be remediated. Also, it is strongly recommended (but not presently required) for these types of modules to have unit tests. These modules are currently shipped with Ansible, but might be shipped separately in the future. """}, 'Ansible Community': {'slug': 'community_supported', 'modules': [], 'output': 'community_maintained.rst', 'blurb': """ These are :doc:`modules maintained by the Ansible Community<community_maintained>`. They **are not** supported by the Ansible Core Team or by companies/partners associated to the module. They are still fully usable, but the response rate to issues is purely up to the community. Best effort support will be provided but is not covered under any support contracts. These modules are currently shipped with Ansible, but will most likely be shipped separately in the future. """}, } # only gen support pages for modules for now, need to split and namespace templates and generated docs if plugin_type == 'plugins': return # Separate the modules by support_level for module, info in plugin_info.items(): if not info.get('metadata', None): display.warning('no metadata for %s' % module) continue if info['metadata']['supported_by'] == 'core': supported_by['Ansible Core Team']['modules'].append(module) elif info['metadata']['supported_by'] == 'network': supported_by['Ansible Network Team']['modules'].append(module) elif info['metadata']['supported_by'] == 'certified': supported_by['Ansible Partners']['modules'].append(module) elif info['metadata']['supported_by'] == 'community': supported_by['Ansible Community']['modules'].append(module) else: raise AnsibleError('Unknown supported_by value: %s' % info['metadata']['supported_by']) # Render the module lists for maintainers, data in supported_by.items(): template_data = {'maintainers': maintainers, 'modules': data['modules'], 'slug': data['slug'], 'module_info': plugin_info, 'plugin_type': plugin_type } text = templates['support_list'].render(template_data) write_data(text, output_dir, data['output']) def validate_options(options): ''' validate option parser options ''' if not options.module_dir: sys.exit("--module-dir is required", file=sys.stderr) if not os.path.exists(options.module_dir): sys.exit("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr) if not options.template_dir: sys.exit("--template-dir must be specified") def main(): # INIT p = generate_parser() (options, args) = p.parse_args() if not options.template_dir: options.template_dir = ["hacking/templates"] validate_options(options) display.verbosity = options.verbosity plugin_type = options.plugin_type # prep templating templates = jinja2_environment(options.template_dir, options.type, plugin_type) # set file/directory structure if plugin_type == 'module': # trim trailing s off of plugin_type for plugin_type=='modules'. ie 'copy_module.rst' outputname = '%s_' + '%s.rst' % plugin_type output_dir = options.output_dir else: # for plugins, just use 'ssh.rst' vs 'ssh_module.rst' outputname = '%s.rst' output_dir = '%s/plugins/%s' % (options.output_dir, plugin_type) display.vv('output name: %s' % outputname) display.vv('output dir: %s' % output_dir) # Convert passed-in limit_to to None or list of modules. if options.limit_to is not None: options.limit_to = [s.lower() for s in options.limit_to.split(",")] plugin_info, categories = get_plugin_info(options.module_dir, limit_to=options.limit_to, verbose=(options.verbosity > 0)) categories['all'] = {'_modules': plugin_info.keys()} display.vvv(pp.pformat(categories)) display.vvvvv(pp.pformat(plugin_info)) # Transform the data if options.type == 'rst': display.v('Generating rst') for key, record in plugin_info.items(): display.vv(key) display.vvvvv(pp.pformat(('record', record))) if record.get('doc', None): short_desc = record['doc']['short_description'].rstrip('.') if short_desc is None: display.warning('short_description for %s is None' % key) short_desc = '' record['doc']['short_description'] = rst_ify(short_desc) if plugin_type == 'module': display.v('Generating Categories') # Write module master category list category_list_text = templates['category_list'].render(categories=sorted(categories.keys())) category_index_name = '%ss_by_category.rst' % plugin_type write_data(category_list_text, output_dir, category_index_name) # Render all the individual plugin pages display.v('Generating plugin pages') process_plugins(plugin_info, templates, outputname, output_dir, options.ansible_version, plugin_type) # Render all the categories for modules if plugin_type == 'module': display.v('Generating Category lists') category_list_name_template = 'list_of_%s_' + '%ss.rst' % plugin_type process_categories(plugin_info, categories, templates, output_dir, category_list_name_template, plugin_type) # Render all the categories for modules process_support_levels(plugin_info, templates, output_dir, plugin_type) if __name__ == '__main__': main()
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from Kamaelia.Chassis.Graphline import Graphline from Kamaelia.Internet.TCPClient import TCPClient from Kamaelia.Util.Console import ConsoleEchoer, ConsoleReader from Kamaelia.Util.OneShot import OneShot import sys if len(sys.argv) != 3: print print "Format of usage wrong, it should be this:" print print " ",sys.argv[0], "host", "port" sys.exit(0) host = sys.argv[1] port = int(sys.argv[2]) if 1: Graphline( MAKESSL = OneShot(" make ssl "), # The actual message here is not necessary CONSOLE = ConsoleReader(), ECHO = ConsoleEchoer(), CONNECTION = TCPClient(host, port), linkages = { ("MAKESSL", "outbox"): ("CONNECTION", "makessl"), ("CONSOLE", "outbox"): ("CONNECTION", "inbox"), ("CONSOLE", "signal"): ("CONNECTION", "control"), ("CONNECTION", "outbox"): ("ECHO", "inbox"), ("CONNECTION", "signal"): ("ECHO", "control"), } ).run()
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent """ import hashlib from kerberos_common import * from resource_management import * # The hash algorithm to use to generate digests/hashes HASH_ALGORITHM = hashlib.sha224 class KerberosServiceCheck(KerberosScript): def service_check(self, env): import params # If Ambari IS managing Kerberos identities (kerberos-env/manage_identities = true), it is # expected that a (smoke) test principal and its associated keytab file is available for use # ** If not available, this service check will fail # ** If available, this service check will execute # # If Ambari IS NOT managing Kerberos identities (kerberos-env/manage_identities = false), the # smoke test principal and its associated keytab file may not be available # ** If not available, this service check will execute # ** If available, this service check will execute if ((params.smoke_test_principal is not None) and (params.smoke_test_keytab_file is not None) and os.path.isfile(params.smoke_test_keytab_file)): print "Performing kinit using %s" % params.smoke_test_principal ccache_file_name = HASH_ALGORITHM("{0}|{1}".format(params.smoke_test_principal, params.smoke_test_keytab_file)).hexdigest() ccache_file_path = "{0}{1}kerberos_service_check_cc_{2}".format(params.tmp_dir, os.sep, ccache_file_name) kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) kinit_command = "{0} -c {1} -kt {2} {3}".format(kinit_path_local, ccache_file_path, params.smoke_test_keytab_file, params.smoke_test_principal) try: # kinit Execute(kinit_command, user=params.smoke_user ) finally: File(ccache_file_path, # Since kinit might fail to write to the cache file for various reasons, an existence check should be done before cleanup action = "delete", ) elif params.manage_identities: err_msg = Logger.filter_text("Failed to execute kinit test due to principal or keytab not found or available") raise Fail(err_msg) else: # Ambari is not managing identities so if the smoke user does not exist, indicate why.... print "Skipping this service check since Ambari is not managing Kerberos identities and the smoke user " \ "credentials are not available. To execute this service check, the smoke user principal name " \ "and keytab file location must be set in the cluster_env and the smoke user's keytab file must" \ "exist in the configured location." if __name__ == "__main__": KerberosServiceCheck().execute()
#!/usr/bin/python # -*- coding: utf-8 -*- """ Python-nvd3 is a Python wrapper for NVD3 graph library. NVD3 is an attempt to build re-usable charts and chart components for d3.js without taking away the power that d3.js gives you. Project location : https://github.com/areski/python-nvd3 """ from .NVD3Chart import NVD3Chart, TemplateMixin class discreteBarChart(TemplateMixin, NVD3Chart): """ A discrete bar chart or bar graph is a chart with rectangular bars with lengths proportional to the values that they represent. Python example:: from nvd3 import discreteBarChart chart = discreteBarChart(name='discreteBarChart', height=400, width=400) xdata = ["A", "B", "C", "D", "E", "F"] ydata = [3, 4, 0, -3, 5, 7] chart.add_serie(y=ydata, x=xdata) chart.buildhtml() Javascript generated: .. raw:: html <div id="discreteBarChart"><svg style="height:450px; width:100%"></svg></div> <script> data_discreteBarChart=[{"values": [{"y": 3, "x": "A"}, {"y": 4, "x": "B"}, {"y": 0, "x": "C"}, {"y": -3, "x": "D"}, {"y": 5, "x": "E"}, {"y": 7, "x": "F"}], "key": "Serie 1", "yAxis": "1"}]; nv.addGraph(function() { var chart = nv.models.discreteBarChart(); chart.margin({top: 30, right: 60, bottom: 20, left: 60}); var datum = data_discreteBarChart; chart.yAxis .tickFormat(d3.format(',.0f')); chart.tooltipContent(function(key, y, e, graph) { var x = String(graph.point.x); var y = String(graph.point.y); var y = String(graph.point.y); tooltip_str = '<center><b>'+key+'</b></center>' + y + ' at ' + x; return tooltip_str; }); d3.select('#discreteBarChart svg') .datum(datum) .transition().duration(500) .attr('width', 400) .attr('height', 400) .call(chart); }); </script> """ CHART_FILENAME = "./discretebarchart.html" template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME) def __init__(self, **kwargs): super(discreteBarChart, self).__init__(**kwargs) self.model = 'discreteBarChart' height = kwargs.get('height', 450) width = kwargs.get('width', None) if kwargs.get('x_is_date', False): self.set_date_flag(True) self.create_x_axis('xAxis', format=kwargs.get('x_axis_format', "%d %b %Y %H %S"), date=True) else: self.create_x_axis('xAxis', format=None) self.create_y_axis('yAxis', format=kwargs.get('y_axis_format', ".0f")) self.set_custom_tooltip_flag(True) self.set_graph_height(height) if width: self.set_graph_width(width)
#!/usr/bin/env python3 import os, sys import json import requests bintray_auth = (os.environ.get('BINTRAY_USER'), os.environ.get('BINTRAY_PASS')) def create_version(version): url = "https://api.bintray.com/packages/kiwix/kiwix/kiwixlib/versions" payload = { 'name': version, 'desc': 'Release of libkiwix' } headers = { 'Content-Type': 'application/json' } r = requests.post(url, data=json.dumps(payload), headers=headers, auth=bintray_auth) rcode = r.status_code if rcode == 409: print("Bintray version %s already exists, skipping." % version) return True rcode_family = rcode // 100 if rcode_family in (2, 3): print("Bintray Version created!") return True print("ERROR : Bintray API response {}".format(rcode)) return False def upload(version, filepath, artefact): url_template = "https://api.bintray.com/content/kiwix/kiwix/kiwixlib/{version}/org/kiwix/kiwixlib/kiwixlib/{version}/{artefact}" parameters = { 'publish': 1, 'override': 1 } # Upload the main artefact url = url_template.format(version=version, artefact=artefact) with open(filepath, 'rb') as f: r = requests.put(url, data=f, auth=bintray_auth, params=parameters) rcode = r.status_code rcode_family = rcode // 100 if rcode_family not in (2, 3): print("ERROR: Fail to upload artefact") print(r.text) return False return True def upload_from_json(json_path): basedir = os.path.dirname(str(json_path)) with open(str(json_path)) as f: options = json.load(f) if not create_version(options['version']): raise RuntimeError("Cannot create version") for file_ in options['files']: path = os.path.join(basedir, file_) if not upload(options['version'], path, file_): raise RuntimeError("Cannot upload file {}".format(file_)) if __name__ == "__main__": try: info_file = sys.argv[1] except IndexError: print("Usage {} infofile".format(sys.argv[0])) sys.exit(-1) print("Use info file {}".format(info_file)) try: upload_from_json(info_file) except RuntimeError as e: sys.exit(str(e))
""" Set up the plot figures, axes, and items to be done for each frame. This module is imported by the plotting routines and then the function setplot is called to set the plot parameters. """ import numpy as np #-------------------------- def setplot(plotdata): #-------------------------- """ Specify what is to be plotted at each frame. Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData. Output: a modified version of plotdata. """ from pyclaw.plotters import colormaps plotdata.clearfigures() # clear any old figures,axes,items data # Figure for pcolor plotfigure = plotdata.new_plotfigure(name='q[0]', figno=0) # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.xlimits = 'auto' plotaxes.ylimits = 'auto' plotaxes.title = 'q[0]' plotaxes.afteraxes = "pylab.axis('scaled')" # Set up for item on these axes: plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = 0 plotitem.pcolor_cmap = colormaps.yellow_red_blue plotitem.add_colorbar = True plotitem.show = True # show on plot? # Figure for contour plotfigure = plotdata.new_plotfigure(name='contour', figno=1) # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.xlimits = 'auto' plotaxes.ylimits = 'auto' plotaxes.title = 'q[0]' plotaxes.afteraxes = "pylab.axis('scaled')" # Set up for item on these axes: plotitem = plotaxes.new_plotitem(plot_type='2d_contour') plotitem.plot_var = 0 plotitem.contour_levels = np.linspace(0.2,0.9,8) plotitem.contour_colors = 'k' plotitem.show = True # show on plot? # Parameters used only when creating html and/or latex hardcopy # e.g., via pyclaw.plotters.frametools.printframes: plotdata.printfigs = True # print figures plotdata.print_format = 'png' # file format plotdata.print_framenos = 'all' # list of frames to print plotdata.print_fignos = 'all' # list of figures to print plotdata.html = True # create html files of plots? plotdata.html_homelink = '../README.html' # pointer for top of index plotdata.latex = True # create latex file of plots? plotdata.latex_figsperline = 2 # layout of plots plotdata.latex_framesperline = 1 # layout of plots plotdata.latex_makepdf = False # also run pdflatex? return plotdata
import os, sys, time #import rrdtool class RRDReporter: def __init__(self, rrddir): self.step = 5; self.dir = rrddir try: os.makedirs(self.dir) except: pass self.now = int(time.time()) / self.step * self.step def create(self, filename): if os.path.exists(filename): return try: ret = rrdtool.create(filename, "--step", str(self.step), "--start", str(self.now-10), "DS:sum:GAUGE:20:U:U", "RRA:AVERAGE:0.5:1:181440", "RRA:AVERAGE:0.5:12:181440") except: pass def startGroup(self): pass def report(self, name, timestamp, value, tags): filename = self.dir+name+".rrd" self.create(filename) try: rrdtool.update(filename, str(timestamp/self.step*self.step)+":"+str(value)) except: pass def endGroup(self): pass def graph(self, name): filename = dir+name+".rrd" rrdtool.graph("debug.png", "--start", "-3600", "DEF:ino=net.rrd:input:AVERAGE", "AREA:ino#00FF00:In traffic") class GmetricReporter: def __init__(self, _cmdGmetric): self.cmdGmetric = _cmdGmetric def sendToGmetric(self, name, val, group="database", mtype="float", \ tmax=20, dmax=10): cmdExec = self.cmdGmetric \ + " --name=" + name \ + " --value=" + str(val) \ + " --group=" + group \ + " --type=" + mtype \ + " --tmax=" + str(tmax) \ + " --dmax=" + str(dmax) try: os.system(cmdExec) except: sys.stderr.write("gmetric reporter sends result error") def startGroup(self): pass def report(self, name, timestamp, val, tags): self.sendToGmetric(name, val) def endGroup(self): pass class LoggerReporter: def __init__(self, _logFileDir): self.logFileDir = _logFileDir self.logger = None self.openLogFile(time.strftime("%Y-%m-%d", time.localtime())) self.logFlushTime = 0 def openLogFile(self, newdate): self.date = newdate try: if self.logger is not None: self.logger.close() self.logger = open("%s-%s.log"%(self.logFileDir, newdate) , "a") except IOError, e: sys.stderr.write(str(e)) sys.stderr.write("logger reporter open log file error") self.logger = None def checkLogger(self, nowtime): if nowtime - self.logFlushTime > 60: self.logFlushTime = nowtime try: self.logger.flush() except IOError, e: sys.stderr.write(str(e)) sys.stderr.write("logger reporter check logger error") newdate = time.strftime("%Y-%m-%d", time.localtime()) if self.date != newdate: self.openLogFile(newdate) def report(self, name, timestamp, val, tags): try: self.logger.write("%s %s\n"%(name, str(val))) except IOError, e: sys.stderr.write(str(e)) sys.stderr.write("logger reporter report write error") def startGroup(self): nowtime = time.time() self.checkLogger(nowtime) try: self.logger.write("%d\n"%(nowtime)); except IOError, e: sys.stderr.write(str(e)) sys.stderr.write("logger reporter start group error") def endGroup(self): pass class StdoutReporter: def startGroup(self): pass def hasType(self): return True def report(self, name, timestamp, val, tags): print "%s %d %s %s"%(name, timestamp, str(val), tags) def report_agg(self, timestamp, reportlist): #for i in reportlist: #for each pinfo result # for key in i: # print key, i[key] #print reportlist for i in reportlist: #if i['container_id'] != '': # continue output = '%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s'% (i['job_id'], i['task_id'], i['container_id'], str(i['cpu_user']), str(i['vmrss']), str(i['ivmss']), str(i['vmsize']), str(i['Xmx']), i['proc'], str(int(i['readbytes'])/1024), str(int(i['writebytes'])/1024), str(float(i['map_p'])), str(float(i['red_p'])), str(int(i['ect']))) print output def endGroup(self): sys.stdout.flush() class ReporterList: def __init__(self): self.list = [] def init1(self, cmdGmetric, logFileDir): self.add(GmetricReporter(cmdGmetric)) self.add(LoggerReporter(logFileDir)) def init2(self, attr): self.add(GmetricReporter(attr.get("gmetriccmd", "gmetric"))) self.add(StdoutReporter()) def add(self, reporter): self.list.append(reporter) def startGroup(self): for rep in self.list: rep.startGroup() def reportLogger(self, name, timestamp, value, tags): self.list[1].report(name, timestamp, value, tags) def report(self, name, timestamp, value, tags): for rep in self.list: rep.report(name, timestamp, value, tags) def endGroup(self): pass
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Presidentielcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import PresidentielcoinTestFramework from test_framework.util import * import time ''' Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respecteved even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() self.block_receive_map = {} def add_connection(self, conn): self.connection = conn self.peer_disconnected = False def on_inv(self, conn, message): pass # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message def on_block(self, conn, message): message.block.calc_sha256() try: self.block_receive_map[message.block.sha256] += 1 except KeyError as e: self.block_receive_map[message.block.sha256] = 1 # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): def veracked(): return self.verack_received return wait_until(veracked, timeout=10) def wait_for_disconnect(self): def disconnected(): return self.peer_disconnected return wait_until(disconnected, timeout=10) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message def on_close(self, conn): self.peer_disconnected = True # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): def received_pong(): return (self.last_pong.nonce == self.ping_counter) self.connection.send_message(msg_ping(nonce=self.ping_counter)) success = wait_until(received_pong, timeout=timeout) self.ping_counter += 1 return success class MaxUploadTest(PresidentielcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("PRESIDENTIELCOIND", "presidentielcoind"), help="presidentielcoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 self.utxo = [] self.txouts = gen_return_txouts() def setup_network(self): # Start a node with maxuploadtarget of 200 MB (/24h) self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=800", "-blockmaxsize=999000"])) def mine_full_block(self, node, address): # Want to create a full block # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit for j in range(14): if len(self.utxo) < 14: self.utxo = node.listunspent() inputs=[] outputs = {} t = self.utxo.pop() inputs.append({ "txid" : t["txid"], "vout" : t["vout"]}) remchange = t["amount"] - Decimal("0.001000") outputs[address]=remchange # Create a basic transaction that will send change back to ourself after account for a fee # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the # # of txouts is stored and is the only thing we overwrite from the original transaction rawtx = node.createrawtransaction(inputs, outputs) newtx = rawtx[0:92] newtx = newtx + self.txouts newtx = newtx + rawtx[94:] # Appears to be ever so slightly faster to sign with SIGHASH_NONE signresult = node.signrawtransaction(newtx,None,None,"NONE") txid = node.sendrawtransaction(signresult["hex"], True) # Mine a full sized block which will be these transactions we just created node.generate(1) def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2*60*60*24*7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # test_nodes[0] will only request old blocks # test_nodes[1] will only request new blocks # test_nodes[2] will test resetting the counters test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) NetworkThread().start() # Start up network handling in another thread [x.wait_for_verack() for x in test_nodes] # Test logic begins here # Now mine a big block self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress()) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) # Mine one more block, so that the prior block looks old self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress()) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() new_block_size = self.nodes[0].getblock(big_new_block)['size'] big_new_block = int(big_new_block, 16) # test_nodes[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 800*1024*1024 daily_buffer = 144 * 4000000 max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 576MB will be reserved for relaying new blocks, so expect this to # succeed for ~235 tries. for i in range(success_count): test_nodes[0].send_message(getdata_request) test_nodes[0].sync_with_ping() assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): test_nodes[0].send_message(getdata_request) test_nodes[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) print("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on test_nodes[1] should succeed indefinitely, # even when over the max upload target. # We'll try 800 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(800): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) print("Peer 1 able to repeatedly download new block") # But if test_nodes[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) print("Peer 1 disconnected after trying to download old block") print("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and test_nodes[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) test_nodes[2].sync_with_ping() test_nodes[2].send_message(getdata_request) test_nodes[2].sync_with_ping() assert_equal(test_nodes[2].block_receive_map[big_old_block], 1) print("Peer 2 able to download old block") [c.disconnect_node() for c in connections] #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 print("Restarting nodes with -whitelist=127.0.0.1") stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) #recreate/reconnect 3 test nodes test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) NetworkThread().start() # Start up network handling in another thread [x.wait_for_verack() for x in test_nodes] #retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist print("Peer 1 still connected after trying to download old block (whitelisted)") [c.disconnect_node() for c in connections] if __name__ == '__main__': MaxUploadTest().main()
# Sources used within this class: # 1. (26.03.2016 @ 15:40) - # https://stackoverflow.com/questions/14693646/writing-to-csv-file-python # 2. (26.03.2016 @ 18:03) - # https://stackoverflow.com/questions/12400256/python-converting-epoch-time-into-the-datetime # 3. (26.03.2016 @ 18:43) - # http://effbot.org/pyfaq/how-do-i-copy-an-object-in-python.htm # 4. (01.04.2016 @ 15:45) - # http://stackoverflow.com/questions/7301110/why-does-return-list-sort-return-none-not-the-list import copy # Necessary to copy value of the starting year - needed for correct csv file name import csv # Necessary to write data to csv files import datetime # Necessary for calculating time differences import numpy as np # Necessary for calculating the arithmetic median of values import os # Necessary to get the name of currently processed file import sys # Necessary to use script arguments from pymongo import MongoClient # Necessary to make use of MongoDB def check_script_arguments(): """Checks if enough and correct arguments have been given to run this script adequate 1. It checks in the first instance if enough arguments have been given 2. Then necessary variables will be filled with appropriate values Args: - Returns: - """ global argument_year_beginning, argument_year_ending # Whenever not enough arguments were given if len(sys.argv) < 2: print("Not enough arguments were given...") print("Terminating script now!") sys.exit() else: # Writes necessary values into the variables argument_year_beginning = int(sys.argv[1]) argument_year_ending = int(sys.argv[2]) def initialize_mongo_db_parameters(actually_processed_year): """Instantiates all necessary variables for the correct usage of the mongoDB-Client Args: actually_processed_year (int) : The year with which parameters the database should be accessed Returns: - """ global mongo_DB_Client_Instance global mongo_DB_Threads_Instance global mongo_DB_Thread_Collection global mongo_DB_Comments_Instance mongo_DB_Client_Instance = MongoClient('localhost', 27017) mongo_DB_Threads_Instance = mongo_DB_Client_Instance['iAMA_Reddit_Threads_' + str(actually_processed_year)] mongo_DB_Thread_Collection = mongo_DB_Threads_Instance.collection_names() mongo_DB_Comments_Instance = mongo_DB_Client_Instance['iAMA_Reddit_Comments_' + str(actually_processed_year)] def start_data_generation_for_analysis(): """Starts the whole combination of generating data, checking data and writing them into csv files 1. Triggers the data generation process and moves forward within the years - by moving through the years a csv file will be created for every year Args: - Returns: - """ global year_actually_in_progress # Copies the value of the beginning year, because it will be changed due to moving forward within the years year_actually_in_progress = copy.copy(argument_year_beginning) while year_actually_in_progress != argument_year_ending: generate_data() add_actual_year_list_to_global_list(list_current_year) write_csv_data(list_current_year) year_actually_in_progress += 1 # Reinitializes the mongodb with new year parameter here # noinspection PyTypeChecker initialize_mongo_db_parameters(year_actually_in_progress) if year_actually_in_progress == argument_year_ending: generate_data() add_actual_year_list_to_global_list(list_current_year) write_csv_data(list_current_year) # Value setting is necessary for correct file writing year_actually_in_progress = "ALL" # Writes a csv file containing information for all years.. # This is very useful, so we do not have to merge all those .csv-files by hand write_csv_data(list_global_year) def generate_data(): """Starts calculating various information about thread and iama behaviour related to the year which is currently being processed After the caluclations have every iteration the results will ber appended to a list, which will contain all that information for the current year... That list will be writtend to csv and appended to a global list in other methods Args: - Returns: - """ global list_current_year # Empty that list for correct processing list_current_year = [] print("Generating data for year " + str(year_actually_in_progress) + " now...") # noinspection PyTypeChecker for j, val in enumerate(mongo_DB_Thread_Collection): # Skips the system.indexes-table which is automatically created by mongodb itself if not val == "system.indexes": # Temporary value assignments for better code understanding temp_thread = mongo_DB_Threads_Instance[val] temp_thread_creation_time = temp_thread.find()[0].get("created_utc") temp_thread_title = temp_thread.find()[0].get("title") temp_thread_downs = temp_thread.find()[0].get("downs") temp_thread_ups = temp_thread.find()[0].get("ups") temp_thread_author = temp_thread.find()[0].get("author") temp_thread_num_comments_skewed = temp_thread.find()[0].get("num_Comments") # Removes iAMA-Requests out of our selection if "request" in temp_thread_title.lower() \ and "as requested" not in temp_thread_title.lower() \ and "by request" not in temp_thread_title.lower() \ and "per request" not in temp_thread_title.lower() \ and "request response" not in temp_thread_title.lower(): # Continue skips processing of those elements which are requests here continue # Will contain information different thread calculations values_for_analysis = process_specific_thread(val, temp_thread_creation_time, temp_thread_author) # Whenever the thread calculation is not None # It is only None whenever only 1 (from "AutoModerator") or 0 comments / reactions have been postedd if values_for_analysis is not None: dict_to_append = { 'Year': year_actually_in_progress, 'Thread_id': str(val), 'Thread_author': temp_thread_author, 'Thread_ups': temp_thread_ups, 'Thread_downs': temp_thread_downs, 'Thread_creation_time_stamp': temp_thread_creation_time, 'Thread_average_comment_vote_score_total': values_for_analysis["comment_total_vote_average"], 'Thread_average_comment_vote_score_tier_1': values_for_analysis["comment_tier_1_vote_average"], 'Thread_average_comment_vote_score_tier_x': values_for_analysis["comment_tier_x_vote_average"], 'Thread_average_question_vote_score_total': values_for_analysis["question_total_vote_average"], 'Thread_average_question_vote_score_tier_1': values_for_analysis["question_tier_1_vote_average"], 'Thread_average_question_vote_score_tier_x': values_for_analysis["question_tier_x_vote_average"], 'Thread_num_comments_total_skewed': temp_thread_num_comments_skewed, 'Thread_num_comments_total': values_for_analysis["comments_total"], 'Thread_num_comments_tier_1': values_for_analysis["comments_tier_1"], 'Thread_num_comments_tier_x': values_for_analysis["comments_tier_x"], 'Thread_num_questions_total': values_for_analysis["questions_total"], 'Thread_num_questions_tier_1': values_for_analysis["questions_tier_1"], 'Thread_num_questions_tier_x': values_for_analysis["questions_tier_x"], 'Thread_num_questions_answered_by_iama_host_total': values_for_analysis["questions_answered_by_iama_host_total"], 'Thread_num_questions_answered_by_iama_host_tier_1': values_for_analysis["questions_answered_by_iama_host_tier_1"], 'Thread_num_questions_answered_by_iama_host_tier_x': values_for_analysis["questions_answered_by_iama_host_tier_x"], 'Thread_num_comments_answered_by_iama_host_total': values_for_analysis["comments_answered_by_iama_host_total"], 'Thread_num_comments_answered_by_iama_host_tier_1': values_for_analysis["comments_answered_by_iama_host_tier_1"], 'Thread_num_comments_answered_by_iama_host_tier_x': values_for_analysis["comments_answered_by_iama_host_tier_x"], 'Thread_average_reaction_time_between_comments_total': values_for_analysis["reaction_time_between_comments_total_average"], 'Thread_average_reaction_time_between_comments_tier_1': values_for_analysis["reaction_time_between_comments_tier_1_average"], 'Thread_average_reaction_time_between_comments_tier_x': values_for_analysis["reaction_time_between_comments_tier_x_average"], 'Thread_average_reaction_time_between_questions_total': values_for_analysis["reaction_time_between_questions_total_average"], 'Thread_average_reaction_time_between_questions_tier_1': values_for_analysis["reaction_time_between_questions_tier_1_average"], 'Thread_average_reaction_time_between_questions_tier_x': values_for_analysis["reaction_time_between_questions_tier_x_average"], 'Thread_average_iama_host_response_to_question_time_total': values_for_analysis["iama_host_response_to_question_time_total_average"], 'Thread_average_iama_host_response_to_question_time_tier_1': values_for_analysis["iama_host_response_to_question_time_tier_1_average"], 'Thread_average_iama_host_response_to_question_time_tier_x': values_for_analysis["iama_host_response_to_question_time_tier_x_average"], 'Thread_average_iama_host_response_to_comment_time_total': values_for_analysis["iama_host_response_to_comment_time_total_average"], 'Thread_average_iama_host_response_to_comment_time_tier_1': values_for_analysis["iama_host_response_to_comment_time_tier_1_average"], 'Thread_average_iama_host_response_to_comment_time_tier_x': values_for_analysis["iama_host_response_to_comment_time_tier_x_average"], 'Thread_life_span_question': values_for_analysis["time_value_of_last_question"], 'Thread_life_span_comment': values_for_analysis["time_value_of_last_comment"], 'Thread_amount_of_questioners_total': values_for_analysis["amount_of_questioners_total"], 'Thread_amount_of_questioners_tier_1': values_for_analysis["amount_of_questioners_tier_1"], 'Thread_amount_of_questioners_tier_x': values_for_analysis["amount_of_questioners_tier_x"], 'Thread_amount_of_commentators_total': values_for_analysis["amount_of_commentators_total"], 'Thread_amount_of_commentators_tier_1': values_for_analysis["amount_of_commentators_tier_1"], 'Thread_amount_of_commentators_tier_x': values_for_analysis["amount_of_commentators_tier_x"] } list_current_year.append(dict_to_append) def process_specific_thread(thread_id, thread_creation_time_stamp, thread_author): """Does the needed operations, for gaining information / knowledge about threads on the given thread id After the caluclations have every iteration the results will ber appended to a list, which will contain all that information for the current year... That list will be writtend to csv and appended to a global list in other methods Args: thread_id (str) : The id, needed for operating (i.E. comparison of parent - child relation) thread_creation_time_stamp (int) : Creation time stamp of thread, needed for time difference calculation thread_author (str): The name of the threads author, needed for answer checking of a post Returns: - """ # Makes the global comments instance locally available here global mongo_DB_Comments_Instance comments_collection = mongo_DB_Comments_Instance[thread_id] # Generating a list out of the cursor is absolutely necessary, because the cursor can be exhausted.. # During the calculations we have to do many iterations on a thread, but by using only one cursor for many # iterations I will be depleted very fast... # To not always generate a new cursor for each iteration this is a way more performant way to do the stuff comments_cursor = list(comments_collection.find()) # Comments and questions are seperated from another !! dict_with_values_to_be_returned = { "comment_total_vote_average": [], "comment_tier_1_vote_average": [], "comment_tier_x_vote_average": [], "question_total_vote_average": [], "question_tier_1_vote_average": [], "question_tier_x_vote_average": [], "reaction_time_between_comments_total_average": [], "reaction_time_between_comments_tier_1_average": [], "reaction_time_between_comments_tier_x_average": [], "reaction_time_between_questions_total_average": [], "reaction_time_between_questions_tier_1_average": [], "reaction_time_between_questions_tier_x_average": [], "iama_host_response_to_comment_time_total_average": [], "iama_host_response_to_comment_time_tier_1_average": [], "iama_host_response_to_comment_time_tier_x_average": [], "iama_host_response_to_question_time_total_average": [], "iama_host_response_to_question_time_tier_1_average": [], "iama_host_response_to_question_time_tier_x_average": [], # Comments from the iama host are excluded "comments_total": 0, "comments_tier_1": 0, "comments_tier_x": 0, # Questions from the iama host are excluded "questions_total": 0, "questions_tier_1": 0, "questions_tier_x": 0, "questions_answered_by_iama_host_total": 0, "questions_answered_by_iama_host_tier_1": 0, "questions_answered_by_iama_host_tier_x": 0, "comments_answered_by_iama_host_total": 0, "comments_answered_by_iama_host_tier_1": 0, "comments_answered_by_iama_host_tier_x": 0, "time_value_of_last_comment": 0, "time_value_of_last_question": 0, # Questioners do not include the iama host itself "amount_of_questioners_total": 0, "amount_of_questioners_tier_1": [], "amount_of_questioners_tier_x": [], # Commentators do not include the iama host itself "amount_of_commentators_total": 0, "amount_of_commentators_tier_1": [], "amount_of_commentators_tier_x": [] } # Every comment for i, val in enumerate(comments_cursor): # Whenever the iterated comment was created by user "AutoModerator" skip it if val.get("author") != "AutoModerator": comment_text = val.get("body") comment_author = val.get("author") comment_parent_id = val.get("parent_id") comment_actual_id = val.get("name") # Check whether that iterated comment is answered by the host comment_has_been_answered_by_thread_author = check_if_comment_has_been_answered_by_thread_author( thread_author, comment_actual_id, comments_cursor) if comment_text is not None and comment_author is not None and comment_parent_id is not None: comment_creation_time = float(val.get("created_utc")) bool_comment_is_question = check_if_comment_is_a_question(comment_text) bool_comment_is_on_tier_1 = check_if_comment_is_on_tier_1(comment_parent_id) bool_comment_is_not_from_thread_author = check_if_comment_is_not_from_thread_author( thread_author, comment_author) # Whenever the iterated "reaction" is a question if bool_comment_is_question is True and bool_comment_is_not_from_thread_author is True: dict_with_values_to_be_returned["questions_total"] += 1 dict_with_values_to_be_returned["question_total_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_questions_total_average"]. \ append(comment_creation_time) if comment_creation_time > dict_with_values_to_be_returned["time_value_of_last_question"]: dict_with_values_to_be_returned["time_value_of_last_question"] = comment_creation_time else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["questions_answered_by_iama_host_total"] += 1 answer_time_stamp_iama_host = comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TOTAL # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_question_time_total_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever we are on tier 1 if bool_comment_is_on_tier_1 is True: dict_with_values_to_be_returned["questions_tier_1"] += 1 dict_with_values_to_be_returned["question_tier_1_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_questions_tier_1_average"]. \ append(comment_creation_time) if comment_author not in dict_with_values_to_be_returned["amount_of_questioners_tier_1"]: dict_with_values_to_be_returned["amount_of_questioners_tier_1"].append(comment_author) else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["questions_answered_by_iama_host_tier_1"] += 1 answer_time_stamp_iama_host = \ comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TIER 1 # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_1_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever we are NOT on tier 1 but on any other tier else: dict_with_values_to_be_returned["questions_tier_x"] += 1 dict_with_values_to_be_returned["question_tier_x_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_questions_tier_x_average"]. \ append(comment_creation_time) if comment_author not in dict_with_values_to_be_returned["amount_of_questioners_tier_x"]: dict_with_values_to_be_returned["amount_of_questioners_tier_x"].append(comment_author) else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["questions_answered_by_iama_host_tier_x"] += 1 answer_time_stamp_iama_host = \ comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TIER X # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_x_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever the iterated "reaction" is just a comment and no question elif bool_comment_is_question is False and bool_comment_is_not_from_thread_author is True: dict_with_values_to_be_returned["comments_total"] += 1 dict_with_values_to_be_returned["comment_total_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_comments_total_average"]. \ append(comment_creation_time) if comment_creation_time > dict_with_values_to_be_returned["time_value_of_last_comment"]: dict_with_values_to_be_returned["time_value_of_last_comment"] = comment_creation_time else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["comments_answered_by_iama_host_total"] += 1 answer_time_stamp_iama_host = comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TOTAL # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_comment_time_total_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever that reaction is a comment and on tier 1 if bool_comment_is_on_tier_1 is True: dict_with_values_to_be_returned["comments_tier_1"] += 1 dict_with_values_to_be_returned["comment_tier_1_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_comments_tier_1_average"]. \ append(comment_creation_time) if comment_author not in dict_with_values_to_be_returned["amount_of_commentators_tier_1"]: dict_with_values_to_be_returned["amount_of_commentators_tier_1"].append(comment_author) else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["comments_answered_by_iama_host_tier_1"] += 1 answer_time_stamp_iama_host = \ comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TIER 1 # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_1_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever this reaction is a comment and on tier x else: dict_with_values_to_be_returned["comments_tier_x"] += 1 dict_with_values_to_be_returned["comment_tier_x_vote_average"].append(val.get("ups")) dict_with_values_to_be_returned["reaction_time_between_comments_tier_x_average"]. \ append(comment_creation_time) if comment_author not in dict_with_values_to_be_returned["amount_of_commentators_tier_x"]: dict_with_values_to_be_returned["amount_of_commentators_tier_x"].append(comment_author) else: pass if comment_has_been_answered_by_thread_author["question_Answered_From_Host"] is True: dict_with_values_to_be_returned["comments_answered_by_iama_host_tier_x"] += 1 answer_time_stamp_iama_host = \ comment_has_been_answered_by_thread_author["time_Stamp_Answer"] # Adds the calculated answer time to a local list for TIER 1 # noinspection PyTypeChecker answer_time_iama_host_in_seconds = calculate_time_difference( comment_creation_time, answer_time_stamp_iama_host) dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_x_average"] \ .append(answer_time_iama_host_in_seconds) else: pass # Whenever that iterated "reaction" is a comment or question FROM the thread author (!) else: pass else: pass # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_comments_total = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_comments_total_average"]), thread_creation_time_stamp) # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_comments_tier_1 = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_comments_tier_1_average"]), thread_creation_time_stamp) # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_comments_tier_x = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_comments_tier_x_average"]), thread_creation_time_stamp) # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_questions_total = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_questions_total_average"]), thread_creation_time_stamp) # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_questions_tier_1 = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_questions_tier_1_average"]), thread_creation_time_stamp) # noinspection PyNoneFunctionAssignment,PyTypeChecker average_reaction_time_questions_tier_x = calculate_reaction_time_average(sorted( dict_with_values_to_be_returned["reaction_time_between_questions_tier_x_average"]), thread_creation_time_stamp) dict_life_span_values = calculate_life_span(thread_creation_time_stamp, dict_with_values_to_be_returned["time_value_of_last_comment"], dict_with_values_to_be_returned["time_value_of_last_question"]) # Filling the dict, which we want to return, with values here if dict_with_values_to_be_returned["comment_total_vote_average"]: dict_with_values_to_be_returned["comment_total_vote_average"] = \ np.median(dict_with_values_to_be_returned["comment_total_vote_average"]) if dict_with_values_to_be_returned["comment_tier_1_vote_average"]: dict_with_values_to_be_returned["comment_tier_1_vote_average"] = \ np.median(dict_with_values_to_be_returned["comment_tier_1_vote_average"]) if dict_with_values_to_be_returned["comment_tier_x_vote_average"]: dict_with_values_to_be_returned["comment_tier_x_vote_average"] = \ np.median(dict_with_values_to_be_returned["comment_tier_x_vote_average"]) if dict_with_values_to_be_returned["question_total_vote_average"]: dict_with_values_to_be_returned["question_total_vote_average"] = \ np.median(dict_with_values_to_be_returned["question_total_vote_average"]) if dict_with_values_to_be_returned["question_tier_1_vote_average"]: dict_with_values_to_be_returned["question_tier_1_vote_average"] = \ np.median(dict_with_values_to_be_returned["question_tier_1_vote_average"]) if dict_with_values_to_be_returned["question_tier_x_vote_average"]: dict_with_values_to_be_returned["question_tier_x_vote_average"] = \ np.median(dict_with_values_to_be_returned["question_tier_x_vote_average"]) dict_with_values_to_be_returned["reaction_time_between_comments_total_average"] = \ average_reaction_time_comments_total dict_with_values_to_be_returned["reaction_time_between_comments_tier_1_average"] = \ average_reaction_time_comments_tier_1 dict_with_values_to_be_returned["reaction_time_between_comments_tier_x_average"] = \ average_reaction_time_comments_tier_x dict_with_values_to_be_returned["reaction_time_between_questions_total_average"] = \ average_reaction_time_questions_total dict_with_values_to_be_returned["reaction_time_between_questions_tier_1_average"] = \ average_reaction_time_questions_tier_1 dict_with_values_to_be_returned["reaction_time_between_questions_tier_x_average"] = \ average_reaction_time_questions_tier_x dict_with_values_to_be_returned["amount_of_questioners_total"] = ( len(dict_with_values_to_be_returned["amount_of_questioners_tier_1"]) + len(dict_with_values_to_be_returned["amount_of_questioners_tier_x"]) ) dict_with_values_to_be_returned["amount_of_commentators_total"] = ( len(dict_with_values_to_be_returned["amount_of_commentators_tier_1"]) + len(dict_with_values_to_be_returned["amount_of_commentators_tier_x"]) ) dict_with_values_to_be_returned["time_value_of_last_question"] = \ dict_life_span_values["lifespan_thread_last_question"] dict_with_values_to_be_returned["time_value_of_last_comment"] = \ dict_life_span_values["lifespan_thread_last_comment"] # Checks all list if they are empty or not.. Whenever they are empty set its value to None.. Because setting it # to 0 or some other datatype affects the statistical calculations in a bad way if not dict_with_values_to_be_returned["comment_total_vote_average"]: dict_with_values_to_be_returned["comment_total_vote_average"] = None if not dict_with_values_to_be_returned["comment_tier_1_vote_average"]: dict_with_values_to_be_returned["comment_tier_1_vote_average"] = None if not dict_with_values_to_be_returned["comment_tier_x_vote_average"]: dict_with_values_to_be_returned["comment_tier_x_vote_average"] = None if not dict_with_values_to_be_returned["question_total_vote_average"]: dict_with_values_to_be_returned["question_total_vote_average"] = None if not dict_with_values_to_be_returned["question_tier_1_vote_average"]: dict_with_values_to_be_returned["question_tier_1_vote_average"] = None if not dict_with_values_to_be_returned["question_tier_x_vote_average"]: dict_with_values_to_be_returned["question_tier_x_vote_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_comment_time_total_average"]: dict_with_values_to_be_returned["iama_host_response_to_comment_time_total_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_comment_time_total_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_comment_time_total_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_1_average"]: dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_1_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_1_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_1_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_x_average"]: dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_x_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_x_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_comment_time_tier_x_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_question_time_total_average"]: dict_with_values_to_be_returned["iama_host_response_to_question_time_total_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_question_time_total_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_question_time_total_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_1_average"]: dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_1_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_1_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_1_average"] = None if dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_x_average"]: dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_x_average"] = \ np.median(dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_x_average"]) else: dict_with_values_to_be_returned["iama_host_response_to_question_time_tier_x_average"] = None if dict_with_values_to_be_returned["amount_of_questioners_total"] == 0: dict_with_values_to_be_returned["amount_of_questioners_total"] = None if not dict_with_values_to_be_returned["amount_of_questioners_tier_1"]: dict_with_values_to_be_returned["amount_of_questioners_tier_1"] = None else: dict_with_values_to_be_returned["amount_of_questioners_tier_1"] = len(dict_with_values_to_be_returned ["amount_of_questioners_tier_1"]) if not dict_with_values_to_be_returned["amount_of_questioners_tier_x"]: dict_with_values_to_be_returned["amount_of_questioners_tier_x"] = None else: dict_with_values_to_be_returned["amount_of_questioners_tier_x"] = len(dict_with_values_to_be_returned ["amount_of_questioners_tier_x"]) if dict_with_values_to_be_returned["amount_of_commentators_total"] == 0: dict_with_values_to_be_returned["amount_of_commentators_total"] = None if not dict_with_values_to_be_returned["amount_of_commentators_tier_1"]: dict_with_values_to_be_returned["amount_of_commentators_tier_1"] = None else: dict_with_values_to_be_returned["amount_of_commentators_tier_1"] = len(dict_with_values_to_be_returned ["amount_of_commentators_tier_1"]) if not dict_with_values_to_be_returned["amount_of_commentators_tier_x"]: dict_with_values_to_be_returned["amount_of_commentators_tier_x"] = None else: dict_with_values_to_be_returned["amount_of_commentators_tier_x"] = len(dict_with_values_to_be_returned ["amount_of_commentators_tier_x"]) # Final checking for return here ! # Whenever one of the two time values is negative (which means nobody or only "AutoModerator" have responded # Return nothing, otherwise return the dict # Whenever there only comments were made but no questions have been asked if (dict_with_values_to_be_returned["time_value_of_last_comment"] > 0) and \ (dict_with_values_to_be_returned["time_value_of_last_question"] < 0): dict_with_values_to_be_returned["time_value_of_last_question"] = None else: pass # Whenever only questions have asked made but no comments have been made if (dict_with_values_to_be_returned["time_value_of_last_comment"] < 0) and \ (dict_with_values_to_be_returned["time_value_of_last_question"] > 0): dict_with_values_to_be_returned["time_value_of_last_comment"] = None else: pass # Whenever no questions and no comments have been made (and even the AutoModerator didn't do anything) if (dict_with_values_to_be_returned["time_value_of_last_comment"] is None or dict_with_values_to_be_returned["time_value_of_last_comment"] < 0) and \ (dict_with_values_to_be_returned["time_value_of_last_question"] is None or dict_with_values_to_be_returned["time_value_of_last_question"] < 0): dict_with_values_to_be_returned["time_value_of_last_comment"] = None dict_with_values_to_be_returned["time_value_of_last_question"] = None else: pass return dict_with_values_to_be_returned def check_if_comment_is_a_question(given_string): """Simply checks whether a given string is a question or not This method simply checks wether a question mark exists within that string or not.. This is just that simple because messing around with natural processing kits to determine the semantic sense would blow up my bachelor work... Args: given_string (int) : The string which will be checked for a question mark Returns: True (bool): Whenever the given string is a question False (bool): Whenever the given string is not a question """ if "?" in given_string: return True else: return False def check_if_comment_is_on_tier_1(comment_parent_id): """Checks whether a comment relies on the first tier or any other tier Args: comment_parent_id (str) : The name id of the comments parent Returns: True (bool): Whenever the comment lies on tier 1 False (bool): Whenever the comment lies on any other tier """ if "t3_" in comment_parent_id: return True else: return False def check_if_comment_is_not_from_thread_author(author_of_thread, comment_author): """Checks whether both strings are equal or not 1. This method simply checks wether both strings match each other or not. I have built this extra method to have a better overview in the main code.. Args: author_of_thread (str) : The name of the thread author (iAMA-Host) comment_author (str) : The name of the comments author Returns: True (bool): Whenever the strings do not match False (bool): Whenever the strings do match answered that given question) """ if author_of_thread != comment_author: return True else: return False def check_if_comment_has_been_answered_by_thread_author(author_of_thread, comment_actual_id, comments_cursor): """Checks whether both strings are equal or not 1. A dictionary containing flags whether that a question is answered by the host with the appropriate timestamp will be created in the beginning. 2. Then the method iterates over every comment within that thread 1.1. Whenever an answer is from the iAMA hosts and the processed comments 'parent_id' matches the iAMA hosts comments (answers) id, the returned dict will contain appropriate values and will be returned 1.2. If this is not the case, it will be returned in its default condition Note: We take a list as 'comments_cursor' and not a real cursor, because real cursors can be exhausted, which could lead to, that not all comments will be iterated.. This is especially critical when you have to do many iterations with only one cursor... [took me 8 hours to figure this "bug" out...] Args: author_of_thread (str) : The name of the thread author (iAMA-Host) comment_actual_id (str) : The id of the actually processed comment comments_cursor (list) : The list containing all comments Returns: True (bool): Whenever the strings do not match False (bool): Whenever the strings do match answered that given question) """ dict_to_be_returned = { "question_Answered_From_Host": False, "time_Stamp_Answer": 0 } # Iterates over every comment for i, val in enumerate(comments_cursor): check_comment_parent_id = val.get("parent_id") actual_comment_author = val.get("author") # Whenever the iterated comment is from the iAMA-Host and that # comment has the question as parent_id if (check_if_comment_is_not_from_thread_author(author_of_thread, actual_comment_author) == False) and \ (check_comment_parent_id == comment_actual_id): dict_to_be_returned["question_Answered_From_Host"] = True dict_to_be_returned["time_Stamp_Answer"] = val.get("created_utc") return dict_to_be_returned # This is the case whenever a comment has not a single thread or the comment / question has not been answered return dict_to_be_returned def calculate_time_difference(comment_time_stamp, answer_time_stamp_iama_host): """Calculates the time difference in seconds between the a comment and its answer from the iama host 1. The time stamps will be converted from epoch into float and afterwards into str again (necessary for correct subtraction) 2. Then the time stamps will be subtracted from each other 3. The containing time difference will be converted into seconds (int) Args: comment_time_stamp (str): The time stamp of the comment answer_time_stamp_iama_host (str): The time stamp of the iAMA hosts answer Returns: time_difference_in_seconds (int) : The time difference of the comment and its answer by the iAMA host in seconds """ # Converts the time_Value into float, otherwise it could not be processed any further... comment_time_value = float(comment_time_stamp) comment_time_converted = datetime.datetime.fromtimestamp( comment_time_value).strftime('%d-%m-%Y %H:%M:%S') comment_time_converted_for_subtraction = datetime.datetime.strptime( comment_time_converted, '%d-%m-%Y %H:%M:%S') # Converts the time_Value into float, otherwise it could not be processed any further... answer_time_iama_host = float(answer_time_stamp_iama_host) answer_time_iama_host_converted = datetime.datetime.fromtimestamp( answer_time_iama_host).strftime('%d-%m-%Y %H:%M:%S') answer_time_iama_host_converted_for_subtraction = datetime.datetime.strptime( answer_time_iama_host_converted, '%d-%m-%Y %H:%M:%S') # Calculates the time difference between the comment and the iAMA hosts answer time_difference_in_seconds = ( answer_time_iama_host_converted_for_subtraction - comment_time_converted_for_subtraction ).total_seconds() return time_difference_in_seconds def calculate_reaction_time_average(list_to_be_processed, thread_creation_time_stamp): """Calculates the reaction time of a list with time values in it Args: list_to_be_processed (list) : The list which contains time values (utc epoch) thread_creation_time_stamp (str) : The string which contains the creation date of the thread (utc epoch) Returns: None : Whenever there were no time values given np.median(time_difference) (float) : Time arithmetic median of the reaction time in seconds """ # Will contain the time difference between each "reactions" in seconds time_difference = [] for i, val in enumerate(list_to_be_processed): # Convert the time_Value into float, otherwise it could not be converted... time_value_current = float(val) current_time_converted = datetime.datetime.fromtimestamp( time_value_current).strftime('%d-%m-%Y %H:%M:%S') current_time_converted_for_subtraction = datetime.datetime.strptime( current_time_converted, '%d-%m-%Y %H:%M:%S') # Whenever a thread only has one single comment which is not null if len(list_to_be_processed) == 1: # Converts the thread creation date into a comparable time format temp_creation_date_of_thread = float(thread_creation_time_stamp) temp_creation_date_of_thread_converted = datetime.datetime.fromtimestamp( temp_creation_date_of_thread).strftime('%d-%m-%Y %H:%M:%S') # Subtracts the comment creation time from the thread creation time temp_thread_time = datetime.datetime.strptime( temp_creation_date_of_thread_converted, '%d-%m-%Y %H:%M:%S') # Add the difference between those two times, in seconds, to that list time_difference.append( (current_time_converted_for_subtraction - temp_thread_time).total_seconds()) # Whenever the last list object is iterated over skip anything because there will be no future object elif i != len(list_to_be_processed) - 1: # Convers the next time_Value into float time_value_next = float(list_to_be_processed[i + 1]) next_time_converted = datetime.datetime.fromtimestamp( time_value_next).strftime('%d-%m-%Y %H:%M:%S') next_time_converted_for_subtraction = datetime.datetime.strptime( next_time_converted, '%d-%m-%Y %H:%M:%S') # Whenever the first commented gets iterated over, build the # difference between thread and 1st comment creation date if i == 0: # Converts the thread creation date into a comparable time format temp_creation_date_of_thread = float(thread_creation_time_stamp) temp_creation_date_of_thread_converted = datetime.datetime.fromtimestamp( temp_creation_date_of_thread).strftime('%d-%m-%Y %H:%M:%S') # Subtracts the comment creation time from the thread creation time temp_thread_time = datetime.datetime.strptime( temp_creation_date_of_thread_converted, '%d-%m-%Y %H:%M:%S') # Add the difference between those two times, in seconds, to that list time_difference.append( (current_time_converted_for_subtraction - temp_thread_time).total_seconds()) else: # Appends the difference between the time of the current and next comment into the time_difference # variable time_difference.append( (next_time_converted_for_subtraction - current_time_converted_for_subtraction).total_seconds()) else: pass if len(time_difference) is 0: return None else: return np.median(time_difference) def calculate_life_span(thread_creation_time_stamp, time_value_of_last_comment, time_value_of_last_question): """Calculates the life span between to time stamps 1. The creation date of a thread gets determined 2. Then the comments will be iterated over, creating a dictionary which is structured as follows: { ('first_Comment_After_Thread_Started', int), ('thread_life_span', int), ('arithmetic_Mean_Response_Time', int), ('median_Response_Time', int), ('id') } 3. That returned dictionary will be appended to a global list 4. That List will be iterated later on and the appropriate graph will be plotted Args: thread_creation_time_stamp (float) : The time stamp (utc epoch) of the thread creation time_value_of_last_comment (float) : The time stamp (utc epoch) of the threads last comment time_value_of_last_question (float) : The time stamp (utc epoch) of the threads last question Returns: dict_to_be_returned (dict) : Containing information about the time differences: Thread creation timestamp <-> Last question time stamp Thread creation timestamp <-> Last comment time stamp """ # Conversion of threads creation timestamp temp_creation_date_of_thread = datetime.datetime.fromtimestamp(float(thread_creation_time_stamp)) \ .strftime('%d-%m-%Y %H:%M:%S') temp_creation_date_of_thread_converted = datetime.datetime.strptime( temp_creation_date_of_thread, '%d-%m-%Y %H:%M:%S') # Conversion of threads last comement time stamp temp_time_stamp_last_comment = datetime.datetime.fromtimestamp(float(time_value_of_last_comment)) \ .strftime('%d-%m-%Y %H:%M:%S') temp_time_stamp_last_comment_converted = datetime.datetime.strptime( temp_time_stamp_last_comment, '%d-%m-%Y %H:%M:%S') # Conversion of threads last question time stamp temp_time_stamp_last_question = datetime.datetime.fromtimestamp(float(time_value_of_last_question)) \ .strftime('%d-%m-%Y %H:%M:%S') temp_time_stamp_last_question_converted = datetime.datetime.strptime( temp_time_stamp_last_question, '%d-%m-%Y %H:%M:%S') # The dictionary containing information about both life spans dict_to_be_returned = { "lifespan_thread_last_comment": (temp_time_stamp_last_comment_converted - temp_creation_date_of_thread_converted).total_seconds(), "lifespan_thread_last_question": (temp_time_stamp_last_question_converted - temp_creation_date_of_thread_converted).total_seconds() } return dict_to_be_returned def add_actual_year_list_to_global_list(list_to_append): """Iterates over a given list with thread information and adds every single element to a global list The global list will be printed to csv in the end Args: list_to_append (list) : List with thread information which will be appended to a global list Returns: - """ global list_global_year for item in list_to_append: list_global_year.append(item) def write_csv_data(list_with_information): """Creates a csv file containing all necessary information about the thread and its mannerism to do research on Args: list_with_information (list) : Contains various information about threads mannerism Returns: - """ print("---- Writing csv containing all thread information for year " + str(year_actually_in_progress) + " now") # Empty print line here for a more beautiful console output print("") file_name_csv = str(os.path.basename(__file__))[0:len(os.path.basename(__file__)) - 3] + \ '_' + \ str(argument_year_beginning) + \ '_until_' + \ str(argument_year_ending) + \ '_' + \ "BIGDATA" + \ '_' + \ str(year_actually_in_progress) + \ '.csv' with open(file_name_csv, 'w', newline='') as fp: csv_writer = csv.writer(fp, delimiter=',') # The heading of the csv file.. sep= is needed, otherwise Microsoft Excel would not recognize seperators.. data = [['Year', 'Thread id', 'Thread author', 'Thread ups', 'Thread downs', 'Thread creation time stamp', 'Thread average comment vote score total', 'Thread average comment vote score tier 1', 'Thread average comment vote score tier x', 'Thread average question vote score total', 'Thread average question vote score tier 1', 'Thread average question vote score tier x', 'Thread num comments total skewed', 'Thread num comments total', 'Thread num comments tier 1', 'Thread num comments tier x', 'Thread num questions total', 'Thread num questions tier 1', 'Thread num questions tier x', 'Thread num questions answered by iama host total', 'Thread num questions answered by iama host tier 1', 'Thread num questions answered by iama host tier x', 'Thread num comments answered by iama host total', 'Thread num comments answered by iama host tier 1', 'Thread num comments answered by iama host tier x', 'Thread average reaction time between comments total', 'Thread average reaction time between comments tier 1', 'Thread average reaction time between comments tier x', 'Thread average reaction time between questions total', 'Thread average reaction time between questions tier 1', 'Thread average reaction time between questions tier x', 'Thread average response to question time iama host total', 'Thread average response to question time iama host tier 1', 'Thread average response to question time iama host tier x', 'Thread average response to comment time iama host total', 'Thread average response to comment time iama host tier 1', 'Thread average response to comment time iama host tier x', 'Thread amount of questioners total', 'Thread amount of questioners tier 1', 'Thread amount of questioners tier x', 'Thread amount of commentators total', 'Thread amount of commentators tier 1', 'Thread amount of commentators tier x', 'Thread life span until last comment', 'Thread life span until last question']] # Iterates over that generated sorted and counts the amount of questions which have not been answered for item in list_with_information: temp_list = [str(item.get("Year")), str(item.get("Thread_id")), str(item.get("Thread_author")), str(item.get("Thread_ups")), str(item.get("Thread_downs")), str(item.get("Thread_creation_time_stamp")), str(item.get("Thread_average_comment_vote_score_total")), str(item.get("Thread_average_comment_vote_score_tier_1")), str(item.get("Thread_average_comment_vote_score_tier_x")), str(item.get("Thread_average_question_vote_score_total")), str(item.get("Thread_average_question_vote_score_tier_1")), str(item.get("Thread_average_question_vote_score_tier_x")), str(item.get("Thread_num_comments_total_skewed")), str(item.get("Thread_num_comments_total")), str(item.get("Thread_num_comments_tier_1")), str(item.get("Thread_num_comments_tier_x")), str(item.get("Thread_num_questions_total")), str(item.get("Thread_num_questions_tier_1")), str(item.get("Thread_num_questions_tier_x")), str(item.get("Thread_num_questions_answered_by_iama_host_total")), str(item.get("Thread_num_questions_answered_by_iama_host_tier_1")), str(item.get("Thread_num_questions_answered_by_iama_host_tier_x")), str(item.get("Thread_num_comments_answered_by_iama_host_total")), str(item.get("Thread_num_comments_answered_by_iama_host_tier_1")), str(item.get("Thread_num_comments_answered_by_iama_host_tier_x")), str(item.get("Thread_average_reaction_time_between_comments_total")), str(item.get("Thread_average_reaction_time_between_comments_tier_1")), str(item.get("Thread_average_reaction_time_between_comments_tier_x")), str(item.get("Thread_average_reaction_time_between_questions_total")), str(item.get("Thread_average_reaction_time_between_questions_tier_1")), str(item.get("Thread_average_reaction_time_between_questions_tier_x")), str(item.get("Thread_average_iama_host_response_to_question_time_total")), str(item.get("Thread_average_iama_host_response_to_question_time_tier_1")), str(item.get("Thread_average_iama_host_response_to_question_time_tier_x")), str(item.get("Thread_average_iama_host_response_to_comment_time_total")), str(item.get("Thread_average_iama_host_response_to_comment_time_tier_1")), str(item.get("Thread_average_iama_host_response_to_comment_time_tier_x")), str(item.get("Thread_amount_of_questioners_total")), str(item.get("Thread_amount_of_questioners_tier_1")), str(item.get("Thread_amount_of_questioners_tier_x")), str(item.get("Thread_amount_of_commentators_total")), str(item.get("Thread_amount_of_commentators_tier_1")), str(item.get("Thread_amount_of_commentators_tier_x")), str(item.get("Thread_life_span_comment")), str(item.get("Thread_life_span_question"))] data.append(temp_list) # Writes data into the csv file csv_writer.writerows(data) # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Necessary variables and scripts are here # Contains the year which is given as an argument argument_year_beginning = 0 # Contains the year which is given as an argument argument_year_ending = 0 # Contains the year which will be processed at the moment year_actually_in_progress = 0 # Contains information for the current year list_current_year = [] # Contains information for all years about all threads within reddit list_global_year = [] # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Methods which are to be called are here # Executes necessary checks check_script_arguments() # Initializes the mongoDB with the arguments given via command line initialize_mongo_db_parameters(argument_year_beginning) # Starts the data generation and writes csv files containg these information start_data_generation_for_analysis()
""" Mozilla CSS extensions. """ import textwrap # Auto generated from: # 'src/codeintel/support/gencix/css/gen_moz_css_properties.py' ### START: Auto generated CSS_MOZ_DATA = { '-moz-appearance': {'description': "The -moz-appearance CSS property is used in Gecko (Firefox) to display an element using a platform-native styling based on the operating system's theme.", 'values': {'-moz-mac-unified-toolbar': 'New in Firefox 3.5. Mac OS X only. This causes the toolbar and title bar to render using the unified toolbar style common to Mac OS X 10.4 and later applications.', '-moz-win-browsertabbar-toolbox': 'New in Firefox 3. Windows Vista and later. This toolbox style is meant to be used for the tab bar in a browser.', '-moz-win-communications-toolbox': 'New in Firefox 3. Windows Vista and later. This toolbox style is meant to be used in communications and productivity applications. Corresponding foreground color is -moz-win-communicationstext .', '-moz-win-glass': 'New in Firefox 3.5. Windows Vista and later. This style applies the Aero Glass effect to the element.', '-moz-win-media-toolbox': 'New in Firefox 3. Windows Vista and later. This toolbox style is meant to be used in applications that manage media objects. Corresponding foreground color is -moz-win-mediatext .', 'button': 'The element is drawn like a button.', 'checkbox': 'The element is drawn like a checkbox, including only the actual "checkbox" portion.', 'checkbox-container': 'The element is drawn like a container for a checkbox, which may include a prelighting background effect under certain platforms. Normally a would contain a label and a checkbox.', 'checkbox-small': '', 'dialog': 'The element is styled like a dialog box, which includes background color and other properties.', 'listbox': '', 'menuitem': 'The element is styled as menu item, item is highlighted when hovered.', 'menulist': '', 'menulist-button': 'The element is styled as a button that would indicate a menulist can be opened.', 'menulist-textfield': 'The element is styled as the text field for a menulist.', 'menupopup': '', 'none': 'No special styling is applied. (Default)', 'progressbar': 'The element is styled like a progress bar.', 'radio': 'The element is drawn like a radio button, including only the actual "radio button" portion.', 'radio-container': 'The element is drawn like a container for a radio button, which may include a prelighting background effect under certain platforms. Normally would contain a label and a radio button.', 'radio-small': '', 'resizer': '', 'scrollbar': '', 'scrollbarbutton-down': '', 'scrollbarbutton-left': '', 'scrollbarbutton-right': '', 'scrollbarbutton-up': '', 'scrollbartrack-horizontal': '', 'scrollbartrack-vertical': '', 'separator': '', 'statusbar': '', 'tab': '', 'tab-left-edge': 'Obsolete. ', 'tabpanels': '', 'textfield': '', 'toolbar': '', 'toolbarbutton': '', 'toolbox': '', 'tooltip': '', 'treeheadercell': '', 'treeheadersortarrow': '', 'treeitem': '', 'treetwisty': '', 'treetwistyopen': '', 'treeview': '', 'window': ''}, 'version': 'Firefox 1.0 (1.0)'}, '-moz-background-clip': {'description': "The background-clip CSS property specifies whether an element's background, either the color or image, extends underneath its border. -moz-background-clip is supported up to Gecko version 1.9.2 (Firefox 3.6). Warning: To support both, older and newer versions of Gecko (Firefox), you have to add both properties in the stylesheet. See examples.", 'values': {'border': '(Firefox 1.0-3.6). The background extends to the outside edge of the border (but underneath the border in z-ordering). Default value, but see Browser compatibility section below for special case Internet Explorer 7.', 'border-box': '(Requires Gecko 1.9.3). The background extends to the outside edge of the border (but underneath the border in z-ordering). Default value, but see Browser compatibility section below for special case Internet Explorer 7.', 'content-box': 'Requires Gecko 1.9.3. The background is painted within (clipped to) the content box.', 'padding': '(Firefox 1.0-3.6). No background is drawn below the border (background extends to the outside edge of the padding).', 'padding-box': '(Requires Gecko 1.9.3). No background is drawn below the border (background extends to the outside edge of the padding).'}, 'version': 'Firefox (Gecko) 1.0-3.6 (1.2-1.9.2)'}, '-moz-background-inline-policy': {'description': 'In Gecko -based applications like Firefox, the -moz-background-inline-policy CSS property specifies how the background image of an inline element is determined when the content of the inline element wraps onto multiple lines. The choice of position has significant effects on repetition.', 'values': {'bounding-box': 'The background image is positioned (and repeated) in the smallest rectangle that contains all of the inline boxes for the element. It is then clipped to be visible only within those boxes, according to the -moz-background-clip property.', 'continuous': 'The background image is positioned (and repeated) as if the inline box were not broken across lines, and then this long rectangle is sliced into pieces for each line.', 'each-box': 'The background image is positioned (and repeated) separately for each box of the inline element. This means that an image with background-repeat : no-repeat may be repeated multiple times.'}}, '-moz-background-origin': {'description': 'The background-origin CSS property determines the background positioning area (the origin of a background-image). background-origin does not apply when background-attachment is fixed . -moz-background-origin is supported up to Gecko version 1.9.2 (Firefox 3.6). Warning: To support both, older and newer versions of Gecko (Firefox), you have to add both properties in the stylesheet. See examples.', 'values': {'border': '(Firefox 1.0-3.6). The background position is relative to the border, so the image can go behind the border.', 'border-box': '(New in Firefox 4). The background position is relative to the border, so the image can go behind the border.', 'content': '(Firefox 1.0-3.6). The background position is relative to the content.', 'content-box': '(New in Firefox 4). The background position is relative to the content.', 'padding': '(Firefox 1.0-3.6). Default value. The background position is relative to the padding. (For single boxes " 0 0 " is the upper left corner of the padding edge, " 100% 100% " is the lower right corner.)', 'padding-box': '(New in Firefox 4). Default value. The background position is relative to the padding. (For single boxes " 0 0 " is the upper left corner of the padding edge, " 100% 100% " is the lower right corner.)'}, 'version': 'Firefox (Gecko) 1.0-3.6 (1.2-1.9.2)'}, '-moz-background-size': {'description': 'The background-size CSS property specifies the size of the background images. -moz-background-size is supported by Gecko version 1.9.2 (Firefox 3.6). Warning: To support both, Firefox 3.6 and newer versions, you have to include both properties in the stylesheet. See examples.', 'values': {'<length>': 'Scales the background image to the specified length in the desired dimension.', '<percentage>': "Scales the background image in the desired dimension to the specified percentage of the background positioning area, which is determined by the value of -moz-background-origin . The background positioning area is, by default, the area containing the content of the box and its padding; the area may also be changed to just the content or to the area containing borders, padding, and content. If the background's attachment is fixed , the background positioning area is instead the entire area of the browser window, not including the area covered by scrollbars if they are present.", 'auto': 'Scales the background image in the relevant direction such that its intrinsic proportions are maintained.', 'contain': 'Specifies that the background image should be scaled to be as large as possible while ensuring both its dimensions are less than or equal to the corresponding dimensions of the background positioning area.', 'cover': 'Specifies that the background image should be scaled to be as small as possible while ensuring both its dimensions are greater than or equal to the corresponding dimensions of the background positioning area.'}, 'version': 'Firefox (Gecko) 3.6 (1.9.2)'}, '-moz-binding': {'description': 'The -moz-binding CSS property is used by Mozilla-based applications to attach an XBL binding to a DOM element.', 'values': {'<uri>': 'The URI for the XBL binding (including the fragment identifier).', 'none': 'no XBL binding is applied to the element.'}}, '-moz-border-bottom-colors': {'description': 'In Mozilla applications like Firefox, -moz-border-bottom-colors sets a list of colors for the bottom border.'}, '-moz-border-end': {}, '-moz-border-end-color': {}, '-moz-border-end-style': {}, '-moz-border-end-width': {}, '-moz-border-image': {'description': 'The border-image CSS property allows drawing an image on the borders of elements. This makes drawing complex looking widgets much simpler than it has been and removes the need for nine boxes in some cases.', 'values': {'<border-width>': '(optional). If the slash / is present in the property value, the one, two, three or four values after it are used for the width of the border instead of the border-width properties. The order of the values is the same as for border-width .', '<image>': '(required). The image value is a <uri> , e.g. url(http://example.org/image.png)', '<number>': '| <percentage> (required). One, two, three or four values represent inward offsets from the top, right, bottom, and left edges of the image (respectively), dividing it into nine regions: four corners, four edges and a middle. One value belongs to all four sides of the image. Two values belong 1. to top and bottom and 2. to right and left side. Three values belong 1. to top, 2. to the right and left side and 3. to bottom. Four values belong to the top, right, bottom and left edge of the image in that order. In Gecko 1.9.1 (Firefox 3.5) the middle part of the image is drawn like a background-image of the element. This may change in future versions. Percentages are relative to the width/height of the image. Numbers represent pixels in the image (if the image is a raster image) or vector coordinates (if the image is an SVG image).', 'none': 'No image displayed, other border styles are used.', 'stretch': '| round | repeat (optional). One or two keywords, that specify how the images for the sides and the middle part are scaled and tiled. stretch (default value) will cause images to be scaled to fit their box. round will tile the images, but also scale them so that a whole number fit in the box. repeat simply tiles the images inside the box. The first keyword describes how to draw the top, middle, and bottom images, while the second describes the left and right borders. If the second is absent, it is assumed to be the same as the first. If both are absent, the default value stretch is used.'}, 'version': 'Firefox (Gecko) 3.5 (1.9.1)'}, '-moz-border-left-colors': {'description': 'In Mozilla applications like Firefox, the -moz-border-left-colors sets a list of colors for the left border.'}, '-moz-border-radius': {'description': 'In Mozilla applications like Firefox, the -moz-border-radius CSS property can be used to give borders rounded corners. The radius applies also to the background even if the element has no border.', 'values': {'<length>': 'See <length> for possible units.', '<percentage>': 'In Gecko (Firefox) Non-standard : A percentage, relative to the width of the box (the percentage is relative to the width even when specifying the radius for a height). In CSS 3: Percentages for the horizontal radius refer to the width of the box, whereas percentages for the vertical radius refer to the height of the box.'}, 'version': 'Firefox (Gecko) 1.0 (1.0)'}, '-moz-border-radius-bottomleft': {'description': 'In Mozilla applications, -moz-border-radius-bottomleft sets the rounding of the bottom-left corner of the border.'}, '-moz-border-radius-bottomright': {'description': 'In Mozilla applications, -moz-border-radius-bottomright sets the rounding of the bottom-right corner of the border.'}, '-moz-border-radius-topleft': {'description': 'In Mozilla applications like Firefox, the -moz-border-radius-topleft CSS property sets the rounding of the top-left corner of the element.', 'values': {'<length>': 'See <length> for possible units.', '<percentage>': 'In Gecko (Firefox) Non-standard : Relative to the width of the box (the percentage is relative to the width even when specifying the radius for a height). In CSS 3: Percentages for the horizontal radius refer to the width of the box, whereas percentages for the vertical radius refer to the height of the box.'}, 'version': 'Firefox 1.0 (Gecko 1.0)'}, '-moz-border-radius-topright': {'description': 'In Mozilla applications like Firefox, the -moz-border-radius-topright CSS property sets the rounding of the top-right corner of the border.'}, '-moz-border-right-colors': {'description': 'In Mozilla applications like Firefox, -moz-border-right-colors sets a list of colors for the right border.'}, '-moz-border-start': {}, '-moz-border-start-color': {}, '-moz-border-start-style': {}, '-moz-border-start-width': {}, '-moz-border-top-colors': {'description': 'In Mozilla applications like Firefox, the -moz-border-top-colors CSS property sets a list of colors for the top border.', 'values': {'<color>': 'Specifies the color of a line of pixels in the bottom border. transparent is valid. See <color> values for possible units.', 'none': 'Default, no colors are drawn or border-color is used, if specified.'}}, '-moz-box-align': {'description': 'In Mozilla applications, -moz-box-align specifies how a XUL box\naligns its contents across (perpendicular to) the direction of its layout. The effect of this is only\nvisible if there is extra space in the box.', 'values': {'baseline': "The box aligns the baselines of the contents (lining up the text). This only applies if the box'sorientation is horizontal.", 'center': 'The box aligns contents in the center, dividing any extra space equally between the start and the end.', 'end': 'The box aligns contents at the end, leaving any extra space at the start.', 'start': 'The box aligns contents at the start, leaving any extra space at the end.', 'stretch': 'The box stretches the contents so that there is no extra space in the box.'}}, '-moz-box-direction': {'description': 'In Mozilla applications, -moz-box-direction specifies whether a box lays out its contents normally (from the top or left edge), or in reverse (from the bottom or right edge).', 'values': {'normal': 'The box lays out its contents from the start (the left or top edge).', 'reverse': 'The box lays out its contents from the end (the right or bottom edge).'}}, '-moz-box-flex': {'description': "In Mozilla applications, -moz-box-flex specifies how a box grows\nto fill the box that contains it, in the direction of the containing box's layout.", 'values': {'0': 'The box does not grow.'}}, '-moz-box-flexgroup': {}, '-moz-box-ordinal-group': {'description': 'Indicates the ordinal group the element belongs to. Elements with a lower ordinal group are displayed before those with a higher ordinal group.', 'values': {}}, '-moz-box-orient': {'description': 'In Mozilla applications, -moz-box-orient specifies whether a box\nlays out its contents horizontally or vertically.', 'values': {'horizontal': 'The box lays out its contents horizontally.', 'vertical': 'The box lays out its contents vertically.'}}, '-moz-box-pack': {'description': 'In Mozilla applications, -moz-box-pack specifies how a box\npacks its contents in the direction of its layout. The effect of this is only\nvisible if there is extra space in the box.', 'values': {'center': 'The box packs contents in the center, dividing any extra space equally between the start and the end.', 'end': 'The box packs contents at the end, leaving any extra space at the start.', 'justify': '?', 'start': 'The box packs contents at the start, leaving any extra space at the end.'}}, '-moz-box-shadow': {'description': 'The box-shadow CSS property accepts one or more shadow effects as a comma-separated list. It allows to cast a drop shadow from the frame of almost any arbitrary element. If a border-radius is specified on the element with a box shadow, the box shadow will take on the same rounded corners. The z-ordering of multiple box shadows is the same as multiple text-shadows (the first specified shadow is on top).', 'values': {'<blur-radius>': "(optional). This is a third <length> value. The larger this value, the bigger the blur, so the shadow becomes bigger and lighter. Negative values are not allowed. If not specified, it will be 0 (the shadow's edge is sharp).", '<color>': "(optional). See <color> values for possible keywords and notations. If not specified, the color depends on the browser. In Gecko (Firefox), the value of the color property is used. WebKit's shadow is transparent and therefore useless if <color> is omitted.", '<offset-x>': '<offset-y> (required). This are two <length> values to set the shadow offset. <offset-x> specifies the horizontal distance. Negative values place the shadow to the left of the element. <offset-y> specifies the vertical distance. Negative values place the shadow above the element. See <length> for possible units. If both values are 0 , the shadow is placed behind the element (and may generate a blur effect if <blur-radius> and/or <spread-radius> is set).', '<spread-radius>': '(optional). This is a fourth <length> value. Positive values will cause the shadow to expand and grow bigger, negative values will cause the shadow to shrink. If not specified, it will be 0 (the shadow will be the same size as the element).', 'inset': '(optional). If not specified (default), the shadow is assumed to be a drop shadow (as if the box were raised above the content). The presence of the inset keyword changes the shadow to one inside the frame (as if the content was depressed inside the box). Inset shadows are drawn above background, but below border and content.'}, 'version': 'Firefox (Gecko) 3.5 (1.9.1)'}, '-moz-box-sizing': {'description': '< CSS < CSS Reference < CSS Reference:Mozilla Extensions', 'values': {}}, '-moz-column-count': {'description': 'In Mozilla applications like Firefox, the -moz-column-count CSS property can be used to set the ideal number of columns into which the content of the element will be flowed.', 'values': {'<integer>': 'Describes the ideal number of columns into which the content of the element will be flowed.'}, 'version': 'Firefox (Gecko) 1.5 (1.8)'}, '-moz-column-gap': {'description': 'In Mozilla applications like Firefox, the -moz-column-gap CSS property sets the gap between columns for block elements which are specified to display as a multi-column element.', 'values': {'<length>': 'A non-negative value in any of the CSS <length> units to specify the gap between columns.', 'normal': 'Default value, depends on the user agent. In desktop browsers like Firefox this is 1em. In Gecko 1.8.1 (Firefox 2.0) and before the default value was 0 .'}, 'version': 'Firefox (Gecko) 1.5 (1.8)'}, '-moz-column-rule': {'description': 'In multi-column layouts, the -moz-column-rule CSS property specifies a straight line, or "rule", to be drawn between each column. -moz-column-rule is a convenient shorthand to avoid setting each of the individual -moz-column-rule-* properties separately: -moz-column-rule-width , -moz-column-rule-style and -moz-column-rule-color .', 'values': {'<border-style>': 'Required , default value none is used if absent. See border-style for possible values and details.', '<border-width>': 'Optional, is one value or keyword of: <length> | thin | medium | thick Default value medium is used if absent. See border-width for details.', '<color>': "Optional , see <color> value. Default value if absent: currentColor , the value of the element's color property (foreground color)."}, 'version': 'Firefox (Gecko) 3.5 (1.9.1)'}, '-moz-column-rule-color': {'description': 'The -moz-column-rule-color CSS property lets you set the color of the rule drawn between columns in multi-column layouts.', 'values': {'<color>': 'See <color> values.'}, 'version': 'Firefox (Gecko) 3.0 (1.9.1)'}, '-moz-column-rule-style': {'description': 'The -moz-column-rule-style CSS property lets you set the style of the rule drawn between columns in multi-column layouts.', 'values': {'<border-style>': 'See border-style'}, 'version': 'Firefox (Gecko) 3.0 (1.9.1)'}, '-moz-column-rule-width': {'description': 'The -moz-column-rule-width CSS property lets you set the width of the rule drawn between columns in multi-column layouts.', 'values': {'<border-width>': 'See border-width .'}, 'version': 'Firefox (Gecko) 3.0 (1.9.1)'}, '-moz-column-width': {'description': 'In Mozilla applications like Firefox, the -moz-column-width CSS property suggests an optimal column width. The actual column width may be wider (to fill the available space), or narrower (only if the available space is smaller than the specified column width).', 'values': {'<length>': 'See <length> value for possible units.'}, 'version': 'Firefox (Gecko) 1.5 (1.8)'}, '-moz-float-edge': {'description': 'bug 432891', 'values': {}}, '-moz-force-broken-image-icon': {'description': '-moz-force-broken-image-icon is an extended CSS property, for more info see bug 58646 . The value 1 forces a broken image icon even if the image has alt text', 'values': {'<integer>': ''}}, '-moz-image-region': {'description': 'For certain XUL elements and pseudo-elements that use an image from the list-style-image property, this property specifies a region of the image that is used in place of the whole image. This allows elements to use different pieces of the same image to improve performance.'}, '-moz-margin-end': {'description': 'In left to right (LTR) situations, the -moz-margin-end CSS property specifies the right margin and is synonymous with margin-right . In RTL cases it sets the left margin (same as margin-left ).', 'values': {'<length>': 'Specifies a fixed width.', '<percentage>': 'A percentage with respect to the width of the containing block.'}, 'version': 'Firefox (Gecko) 1.0 (1.7)'}, '-moz-margin-start': {'description': 'In left to right (LTR) situations the -moz-margin-start CSS property specifies the left margin and is synonymous with margin-left . In RTL cases it sets the right margin (same as margin-right ).', 'values': {'<length>': 'Specifies a fixed width.', '<percentage>': 'a percentage with respect to the width of the containing block.'}, 'version': 'Firefox (Gecko) 1.0 (1.7)'}, '-moz-opacity': {'description': 'The opacity CSS property specifies the transparency of an element, i.e. the degree to which the background behind the element is overlaid.', 'values': {'0': '< number < 1. The element is translucent (background can be seen).', '1': 'The element is fully opaque (solid).'}, 'version': 'Firefox (Gecko) 0.9 (1.7) opacity'}, '-moz-outline': {'description': '(OBSOLETE) Starting with Gecko 1.8 (Firefox 1.5), the standard CSS 2.1 outline property is supported as well. Use of outline is preferred to -moz-outline .', 'values': {}}, '-moz-outline-color': {'description': '(OBSOLETE) Starting with Gecko 1.8 / Firefox 1.5, the standard CSS 2.1 outline-color property is supported as well. Use of outline-color is preferred to -moz-outline-color .', 'values': {}}, '-moz-outline-offset': {'description': '(OBSOLETE) Support since Gecko 1.8 (Firefox 1.5) contemporary with the standard CSS 3 outline-offset property. Use only outline-offset .', 'values': {}}, '-moz-outline-radius': {'description': 'In Mozilla applications like Firefox, the -moz-outline-radius CSS property can be used to give outlines rounded corners. An outline is a line that is drawn around elements, outside the border edge, to make the element stand out.', 'values': {'<length>': 'See <length> for possible values', '<percentage>': 'A <percentage> , relative to the width of the box'}}, '-moz-outline-radius-bottomleft': {'description': 'In Mozilla applications, -moz-outline-radius-bottomleft sets the rounding of the bottom-left corner of the outline.'}, '-moz-outline-radius-bottomright': {'description': 'In Mozilla applications, -moz-outline-radius-bottomright sets the rounding of the bottom-right corner of the outline.'}, '-moz-outline-radius-topleft': {'description': 'In Mozilla applications, -moz-outline-radius-topleft sets the rounding of the top-left corner of the outline.'}, '-moz-outline-radius-topright': {'description': 'In Mozilla applications, -moz-outline-radius-topright sets the rounding of the top-right corner of the outline.'}, '-moz-outline-style': {'description': '(OBSOLETE) Starting with Gecko 1.8 / Firefox 1.5, the standard CSS 2.1 outline-style property is supported as well. Use of outline-style is preferred to -moz-outline-style .', 'values': {}}, '-moz-outline-width': {'description': '(OBSOLETE) Starting with Gecko 1.8 / Firefox 1.5, the standard CSS 2.1 outline-width property is supported as well. Use of outline-width is preferred to -moz-outline-width .', 'values': {}}, '-moz-padding-end': {'description': "When rendering right-to-left text, -moz-padding-end flips the element's padding without having to specify absolute left or right. In a left-to-right text display -moz-padding-end is treated as a right sided padding, while in a right-to-left display it is padded on the left.", 'values': {'<length>': 'Specifies a fixed width.', '<percentage>': 'a percentage with respect to the width of the containing block.'}}, '-moz-padding-start': {'description': 'In Right to Left situations -moz-padding-start flips the elements padding without having to specify absolute left or right. In a Left to Right display -moz-padding-start would be treated as a left sided padding, and alternately in a Right to Left display it would become the right.', 'values': {'<length>': 'Specifies a fixed width.', '<percentage>': 'a percentage with respect to the width of the containing block.'}}, '-moz-stack-sizing': {'description': '-moz-stack-sizing is an extended CSS property. Normally, a stack will change its size so that all of its child elements are completely visible. For example, moving a child of the stack far to the right will widen the stack so the child remains visible.', 'values': {'ignore': "The stack won't consider this child when calculating the its size.", 'stretch-to-fit': "The child will influence the stack's size."}}, '-moz-transform': {'description': '< CSS < CSS Reference < CSS Reference:Mozilla Extensions Introduced in Gecko 1.9.1 (Firefox 3.5 / Thunderbird 3 / SeaMonkey 2)', 'values': {'none': 'Specifies that no transform should be applied.', 'transform-function': 'One or more of the CSS transform functions to be applied, see below.'}}, '-moz-transform-origin': {'description': "The -moz-transform-origin CSS property lets you modify the origin for transformations of an element. For example, the transform-origin of the rotate() function is the centre of rotation. (This property is applied by first translating the element by the negated value of the property, then applying the element's transform, then translating by the property value.)", 'values': {'<length>': 'With a value pair of e.g. 2cm 1cm , the transform-origin is placed 2cm to the right and 1cm below the upper left corner of the element.', '<percentage>': 'With a value pair of 0% 0% , (or just 0 0 ) the transform-origin is the upper left corner of the box. A value pair of 100% 100% places the transform-origin to the lower right corner. With a value pair of 14% 84% , the point 14% across and 84% down the box is the transform-origin.', 'bottom': 'right | right bottom. Same as 100% 100%', 'center': '| center center. Same as 50% 50% (default value)', 'left': '| left center | center left. Same as 0 50%', 'right': '| right center | center right. Same as 100% 50%', 'top': '| top center | center top. Same as 50% 0'}, 'version': 'Firefox (Gecko) 3.5 (1.9.1)'}, '-moz-transition': {'description': 'The -moz-transition CSS property is a shorthand property for -moz-transition-property , -moz-transition-duration , -moz-transition-timing-function , and -moz-transition-delay .', 'version': 'Firefox (Gecko) 3.7? (Gecko 1.9.3)'}, '-moz-transition-delay': {'description': 'The -moz-transition-delay CSS property specifies the number of seconds to wait between a change being requested to a property that is to be transitioned and the start of the transition effect .', 'values': {'time': "The number of seconds to wait between a property's value changing and the start of the animation effect."}, 'version': 'Firefox (Gecko) 3.7? (Gecko 1.9.3)'}, '-moz-transition-duration': {'description': 'The -moz-transition-duration CSS property specifies the number of seconds a transition animation should take to complete. By default, the value is 0, meaning that no animation will occur.', 'values': {'time': 'The number of seconds the transition from the old value of a property to the new value should take.'}, 'version': 'Firefox (Gecko) 3.7? (Gecko 1.9.3)'}, '-moz-transition-property': {'description': 'The -moz-transition-property CSS property is used to specify the names of CSS properties to which a transition effect should be applied.', 'values': {'all': 'All properties that can have an animated transition will do so.', 'none': 'No properties will transition.', 'property-name': 'A property to which a transition effect should be applied when its value changes.'}, 'version': 'Firefox (Gecko) 3.7? (Gecko 1.9.3)'}, '-moz-transition-timing-function': {'description': 'The -moz-transition-timing-function CSS property is used to describe how the intermediate values of the CSS properties being affected by a transition effect are calculated. This in essence lets you establish an acceleration curve, so that the speed of the transition can vary over its duration.', 'values': {'cubic-bezier': 'Specifies a cubic bezier curve to use as the easing function. The four number values specify the P 1 and P 2 points of the curve as (x 1 , y 1 , x 2 , y 2 ). All values must be in the range [0.0, 1.0] inclusive.', 'ease': 'This keyword sets the easing function to cubic-bezier(0.25, 0.1, 0.25, 1.0) .', 'ease-in': 'This keyword sets the easing function to cubic-bezier(0.42, 0.0, 1.0, 1.0) .', 'ease-in-out': 'This keyword sets the easing function to cubic-bezier(0.42, 0.0, 0.58, 1.0) .', 'ease-out': 'This keyword sets the easing function to cubic-bezier(0.0, 0.0, 0.58, 1.0) .', 'linear': 'This keyword sets the easing function to cubic-bezier(0.0, 0.0, 1.0, 1.0) .'}, 'version': 'Firefox (Gecko) 3.7? (Gecko 1.9.3)'}, '-moz-user-focus': {'description': "Used to indicate whether the element can have the focus. By setting this to 'ignore', you can disable focusing the element, which means that the user will not be able to activate the element. The element will be skipped in the tab sequence. A similar property 'user-focus' has been proposed for CSS3.", 'values': {'ignore': 'The element does not accept the keyboard focus and will be skipped in the tab order.', 'normal': 'The element can accept the keyboard focus.'}}, '-moz-user-input': {'description': 'In Mozilla applications, -moz-user-input determines if an element will accept user input.', 'values': {'disabled': 'The element does not accept user input. However, this is not the same as setting disabled to true, in that the element is drawn normally.', 'enabled': 'The element accepts user input. For textboxes, this is the default behavior.', 'none': 'The element does not respond to user input, and it does not become :active .'}}, '-moz-user-modify': {}, '-moz-user-select': {'description': "Controls the appearance (only) of selection. This does not have any affect on actual selection operation. This doesn't have any effect on content loaded as chrome, except in textboxes.", 'values': {'-moz-none': 'The text of the element and sub-elements cannot be selected, but selection can be enabled on sub-elements using -moz-user-select:text .', 'all': 'In HTML editor, if double-click or context-click occurred in sub-elements, the highest ancestor with this value will be selected.', 'none': 'The text of the element and sub-elements will appear as if they cannot be selected. Any use of Selection however will contain these elements.', 'text': 'The text can be selected by the user.'}}, '-moz-window-shadow': {'description': '-moz-window-shadow specifies whether a window will have a shadow. Currently it only works on Mac OS X.', 'values': {'default': 'The window will have a shadow with the default window shadow style.', 'none': "The window won't have a shadow."}}, } ### END: Auto generated CSS_MOZ_SPECIFIC_ATTRS_DICT = {} CSS_MOZ_SPECIFIC_CALLTIP_DICT = {} for attr, details in list(CSS_MOZ_DATA.items()): values = details.get("values", {}) attr_completions = sorted(values.keys()) if attr_completions: CSS_MOZ_SPECIFIC_ATTRS_DICT[attr] = attr_completions else: CSS_MOZ_SPECIFIC_ATTRS_DICT[attr] = None description = details.get("description") if description: desc_lines = textwrap.wrap(description, width=60) if values: desc_lines.append("") for value, attr_desc in list(values.items()): attr_desc = " %r: %s" % (value, attr_desc) attr_desc_lines = textwrap.wrap(attr_desc, width=50) for i in range(len(attr_desc_lines)): attr_line = attr_desc_lines[i] if i > 0: attr_line = " " + attr_line desc_lines.append(attr_line) CSS_MOZ_SPECIFIC_CALLTIP_DICT[attr] = "\n".join( desc_lines).encode("ascii", 'replace')
#!/usr/bin/env python # -*- coding: utf-8 -*- """ OMERO.fs Abstract Monitor module. Copyright 2009 University of Dundee. All rights reserved. Use is subject to license terms supplied in LICENSE.txt """ import threading class AbstractPlatformMonitor(threading.Thread): """ A Thread to monitor a path. :group Constructor: __init__ :group Other methods: run, stop """ def __init__(self, eventTypes, pathMode, pathString, whitelist, blacklist, ignoreSysFiles, ignoreDirEvents, proxy): """ Set-up Monitor thread. """ threading.Thread.__init__(self) self.eTypes = [] for eT in eventTypes: self.eTypes.append(str(eT)) self.pathMode = str(pathMode) self.pathsToMonitor = pathString self.whitelist = whitelist self.blacklist = blacklist self.ignoreSysFiles = ignoreSysFiles self.ignoreDirEvents = ignoreDirEvents self.proxy = proxy def run(self): """ Start monitoring. :return: No explicit return value. """ # pass def stop(self): """ Stop monitoring :return: No explicit return value. """ # pass def propagateEvents(self, eventList): """ Propagate events to proxy. :Parameters: eventPath : List events. :return: No explicit return value. """ if len(eventList) > 0: try: self.log.info('Event notification : %s', str(eventList)) self.proxy.callback(eventList) except: self.log.exception("Notification failed : ") else: self.log.info('No notifications propagated')
#!/usr/bin/env python # # Copyright (C) 2016, the ximpol team. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import os from ximpol import XIMPOL_CONFIG, XIMPOL_DATA from ximpol.core.pipeline import xPipeline from ximpol.evt.binning import xBinnedStokesCube from ximpol.utils.matplotlib_ import pyplot as plt """Script-wide simulation and analysis settings. """ CFG_FILE = os.path.join(XIMPOL_CONFIG, 'uniform_disk.py') OUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'uniform_disk') EVT_FILE_PATH = '%s.fits' % OUT_FILE_PATH_BASE SIM_DURATION = 10000. EMIN = 2. EMAX = 8. EBINS = 1 NXPIX = 64 NYPIX = 64 BINSZ = 10 """Main pipeline object. """ PIPELINE = xPipeline(clobber=False) def run(): PIPELINE.xpobssim(configfile=CFG_FILE, duration=SIM_DURATION, outfile=EVT_FILE_PATH) stokes_file = PIPELINE.xpbin(EVT_FILE_PATH, algorithm='SCUBE', nxpix=NXPIX, nypix=NYPIX, binsz=BINSZ, emin=EMIN, emax=EMAX, ebins=EBINS) stokes = xBinnedStokesCube(stokes_file) stokes.plot(ebin=0, slice=0, show=False) pol_list = stokes.polarization_degree_angle(ebin=0, smooth=1, sigma=3) fig_list = stokes.plot_polarization_degree_angle(pol_list, show=False) plt.show() if __name__ == '__main__': run()
# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_snapshot_export_locations from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy class ShareSnapshotExportLocationController(wsgi.Controller): def __init__(self): self._view_builder_class = ( share_snapshot_export_locations.ViewBuilder) self.resource_name = 'share_snapshot_export_location' super(self.__class__, self).__init__() @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def index(self, req, snapshot_id): context = req.environ['manila.context'] snapshot = self._verify_snapshot(context, snapshot_id) return self._view_builder.list_export_locations( req, snapshot['export_locations']) @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def show(self, req, snapshot_id, export_location_id): context = req.environ['manila.context'] self._verify_snapshot(context, snapshot_id) export_location = db_api.share_snapshot_instance_export_location_get( context, export_location_id) return self._view_builder.detail_export_location(req, export_location) def _verify_snapshot(self, context, snapshot_id): try: snapshot = db_api.share_snapshot_get(context, snapshot_id) share = db_api.share_get(context, snapshot['share_id']) if not share['is_public']: policy.check_policy(context, 'share', 'get', share) except exception.NotFound: msg = _("Snapshot '%s' not found.") % snapshot_id raise exc.HTTPNotFound(explanation=msg) return snapshot def create_resource(): return wsgi.Resource(ShareSnapshotExportLocationController())
from auth import auth from app import app from flask import jsonify from peewee import * from functools import wraps from flask import g, request, redirect, url_for, Response from models import Card, Logrefill, Logpayment, Charge import datetime def response_auth_failed(): return Response('Authentication failed', 401, { 'WWW-Authenticate': 'Basic realm="Login Required"' }) def custom_login_required(f): @wraps(f) def decorated_function(*args, **kwargs): basic_auth = request.authorization if not basic_auth: return response_auth_failed() g.user = auth.authenticate(basic_auth.username, basic_auth.password) if not g.user: return response_auth_failed() return f(*args, **kwargs) return decorated_function @app.route('/') def homepage(): return 'Welcome to A2B Restful API!' @app.route('/private/') @auth.login_required def private_view(): return 'This is private!' @app.route('/custom_api/refill/<int:card_id>', methods=['POST']) @custom_login_required def refill(card_id): if not request.json or 'credit' not in request.json: return Response('Missing credit parameter.', 400) # Get Card(vat, credit) card = Card.select(Card.credit).where(Card.id == card_id) if not card and not card[0]: return Response('Card not found.', 400) vat = card[0].vat credit = float(request.json['credit']) prev_credit = card[0].credit new_balance = prev_credit + credit Card.update(credit=new_balance).where(Card.id == card_id).execute() credit_without_vat = credit / (1 + vat / 100) # add logrefill logrefill = Logrefill(card=card_id, date=datetime.datetime.now, credit=credit, refill_type=0) logrefill.save() # add logpayment logpayment = Logpayment(card=card_id, date=datetime.datetime.now, payment=credit, payment_type=0, id_logrefill=logrefill.id) logpayment.save() # prepare dictionary for JSON return data = { 'card_id': card_id, 'current_balance': new_balance, 'credit_without_vat': credit_without_vat, 'credited': credit, 'vat': card[0].vat, 'logrefill_id': logrefill.id, 'logpayment_id': logpayment.id } return jsonify(data) @app.route('/custom_api/extra_charge/<int:card_id>', methods=['POST']) @custom_login_required def extra_charge(card_id): if not request.json or 'amount' not in request.json: return Response('Missing amount parameter.', 400) # Get Card card = Card.select(Card.credit).where(Card.id == card_id) if not card and not card[0]: return Response('Card not found.', 400) amount = float(request.json['amount']) prev_credit = card[0].credit new_balance = prev_credit - amount Card.update(credit=new_balance).where(Card.id == card_id).execute() # add charge charge = Charge(id_cc_card=card_id, amount=amount, chargetype=4) charge.save() # prepare dictionary for JSON return data = { 'card_id': card_id, 'current_balance': new_balance, 'amount': amount, 'charge_id': charge.id } return jsonify(data)
"""Run a Schelling (1969, :cite:`Schelling69`) segregation model and store a list with locations by type at each cycle. The scripts expects that a model name is passed as an argument. The model name must correspond to a file called ``[model_name].json`` in the "IN_MODEL_SPECS" directory. """ import json import logging import pickle import sys import numpy as np from bld.project_paths import project_paths_join as ppj from src.model_code.agent import Agent def setup_agents(model): """Load the simulated initial locations and return a list that holds all agents. """ initial_locations = np.loadtxt( ppj("OUT_DATA", "initial_locations.csv"), delimiter="," ) initial_locations = initial_locations.reshape(2, model["n_types"], 30000) agents = [] for typ in range(model["n_types"]): for i in range(model["n_agents_by_type"][typ]): agents.append( Agent( typ=typ, initial_location=initial_locations[typ, :, i], n_neighbours=model["n_neighbours"], require_same_type=model["require_same_type"], max_moves=model["max_moves"], ) ) return agents def _get_locations_by_round_dict(model): """Return a dictionary with arrays to store locations for each type.""" return { typ: np.zeros((model["n_agents_by_type"][typ], 2)) * np.nan for typ in range(model["n_types"]) } def _store_locations_by_round(loc, agents): """Update the dictionary *loc* with the locations of each agent. Doing so is a bit tedious because we do so by type. """ counter = {0: 0, 1: 0} for agent in agents: typ = agent.type loc[typ][counter[typ], :] = agent.location counter[typ] += 1 def run_analysis(agents, model): """Given an initial set of *agents* and the *model*'s parameters, return a list of dictionaries with *type: N x 2* items. """ locations_by_round = [_get_locations_by_round_dict(model)] _store_locations_by_round(locations_by_round[-1], agents) for loop_counter in range(model["max_iterations"]): logging.info(f"Entering loop {loop_counter}") # Make room for locations. locations_by_round.append(_get_locations_by_round_dict(model)) # Update locations as necessary someone_moved = False for agent in agents: old_location = agent.location # If necessary, move around until happy agent.move_until_happy(agents) if not (agent.location == old_location).all(): someone_moved = True _store_locations_by_round(locations_by_round[-1], agents) # We are done if everybody is happy. if not someone_moved: break if someone_moved: logging.info( "No convergence achieved after {} iterations".format( model["max_iterations"] ) ) return locations_by_round if __name__ == "__main__": model_name = sys.argv[1] model = json.load( open(ppj("IN_MODEL_SPECS", model_name + ".json"), encoding="utf-8") ) logging.basicConfig( filename=ppj("OUT_ANALYSIS", "log", f"schelling_{model_name}.log"), filemode="w", level=logging.INFO, ) np.random.seed(model["rng_seed"]) logging.info(model["rng_seed"]) # Load initial locations and setup agents agents = setup_agents(model) # Run the main analysis locations_by_round = run_analysis(agents, model) # Store list with locations after each round with open(ppj("OUT_ANALYSIS", f"schelling_{model_name}.pickle"), "wb") as out_file: pickle.dump(locations_by_round, out_file)
"""Mixin class for handling connection state changes.""" import logging from homeassistant.helpers.event import async_call_later _LOGGER = logging.getLogger(__name__) TIME_MARK_DISCONNECTED = 10 class ConnectionStateMixin: """Base implementation for connection state handling.""" def __init__(self): """Initialize this mixin instance.""" super().__init__() self._unsub_mark_disconnected = None async def async_got_connected(self, _=None): """Notification that we're connected to the HUB.""" _LOGGER.debug("%s: connected to the HUB", self._name) self.async_write_ha_state() self._clear_disconnection_delay() async def async_got_disconnected(self, _=None): """Notification that we're disconnected from the HUB.""" _LOGGER.debug("%s: disconnected from the HUB", self._name) # We're going to wait for 10 seconds before announcing we're # unavailable, this to allow a reconnection to happen. self._unsub_mark_disconnected = async_call_later( self.hass, TIME_MARK_DISCONNECTED, self._mark_disconnected_if_unavailable ) def _clear_disconnection_delay(self): if self._unsub_mark_disconnected: self._unsub_mark_disconnected() self._unsub_mark_disconnected = None def _mark_disconnected_if_unavailable(self, _): self._unsub_mark_disconnected = None if not self.available: # Still disconnected. Let the state engine know. self.async_write_ha_state()
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import datetime import json import operator import time import flask from dashboard import decorators from dashboard import helpers from dashboard import vault from stackalytics.processor import utils blueprint = flask.Blueprint('reports', __name__, url_prefix='/report') @blueprint.route('/blueprint/<module>/<blueprint_name>') @decorators.templated() @decorators.exception_handler() def blueprint_summary(module, blueprint_name): blueprint_id = utils.get_blueprint_id(module, blueprint_name) bpd = vault.get_memory_storage().get_record_by_primary_key( 'bpd:' + blueprint_id) if not bpd: flask.abort(404) return bpd = helpers.extend_record(bpd) record_ids = vault.get_memory_storage().get_record_ids_by_blueprint_ids( [blueprint_id]) activity = [helpers.extend_record(record) for record in vault.get_memory_storage().get_records(record_ids)] activity.sort(key=lambda x: x['date'], reverse=True) return {'blueprint': bpd, 'activity': activity} def _get_day(timestamp, time_now): return int((time_now - timestamp) / 60 / 60 / 24) def _process_stat(data, key, time_now): if not data: return None data = sorted(data, key=operator.itemgetter(key)) days = _get_day(data[0][key], time_now) chart_data = [0] * (days + 1) sum_ages = 0 for review in data: age = time_now - review[key] sum_ages += age review[key + '_age'] = utils.make_age_string(age) chart_data[_get_day(review[key], time_now)] += 1 return { 'reviews': data, 'average': utils.make_age_string(sum_ages / len(data)), 'max': data[0][key + '_age'], 'chart_data': json.dumps(chart_data), } @blueprint.route('/reviews/<module>/open') @decorators.templated() @decorators.exception_handler() def open_reviews(module): memory_storage_inst = vault.get_memory_storage() time_now = int(time.time()) module_id_index = vault.get_vault()['module_id_index'] module = module.lower() if module in module_id_index: modules = module_id_index[module]['modules'] else: modules = [module] review_ids = (memory_storage_inst.get_record_ids_by_modules(modules) & memory_storage_inst.get_record_ids_by_type('review')) waiting_on_reviewer = [] total_open = 0 for review in memory_storage_inst.get_records(review_ids): if review['status'] == 'NEW': total_open += 1 if review['value'] in [1, 2]: waiting_on_reviewer.append(review) return { 'module': module, 'total_open': total_open, 'waiting_on_reviewer': len(waiting_on_reviewer), 'waiting_on_submitter': total_open - len(waiting_on_reviewer), 'latest_revision': _process_stat( waiting_on_reviewer, 'updated_on', time_now), 'first_revision': _process_stat(waiting_on_reviewer, 'date', time_now), } @blueprint.route('/contribution/<module>/<days>') @decorators.templated() @decorators.exception_handler() def contribution(module, days): return { 'module': module, 'days': days, 'start_date': int(time.time()) - int(days) * 24 * 60 * 60 } def _get_punch_card_data(records): punch_card_raw = [] # matrix days x hours for wday in xrange(0, 7): punch_card_raw.append([0] * 24) for record in records: tt = datetime.datetime.fromtimestamp(record['date']).timetuple() punch_card_raw[tt.tm_wday][tt.tm_hour] += 1 punch_card_data = [] # format for jqplot bubble renderer for wday in xrange(0, 7): for hour in xrange(0, 24): v = punch_card_raw[wday][hour] if v: punch_card_data.append([hour, wday, v, v]) # add corner point, otherwise chart doesn't know the bounds if punch_card_raw[0][0] == 0: punch_card_data.append([0, 0, 0, 0]) if punch_card_raw[6][23] == 0: punch_card_data.append([23, 6, 0, 0]) return json.dumps(punch_card_data) @blueprint.route('/users/<user_id>') @decorators.templated() @decorators.exception_handler() def user_activity(user_id): user = vault.get_user_from_runtime_storage(user_id) if not user: flask.abort(404) user = helpers.extend_user(user) memory_storage_inst = vault.get_memory_storage() records = memory_storage_inst.get_records( memory_storage_inst.get_record_ids_by_user_ids([user_id])) records = sorted(records, key=operator.itemgetter('date'), reverse=True) return { 'user': user, 'total_records': len(records), 'contribution': helpers.get_contribution_summary(records), 'punch_card_data': _get_punch_card_data(records), } @blueprint.route('/companies/<company>') @decorators.templated() @decorators.exception_handler() def company_activity(company): memory_storage_inst = vault.get_memory_storage() original_name = memory_storage_inst.get_original_company_name(company) memory_storage_inst = vault.get_memory_storage() records = memory_storage_inst.get_records( memory_storage_inst.get_record_ids_by_companies([original_name])) records = sorted(records, key=operator.itemgetter('date'), reverse=True) return { 'company_name': original_name, 'total_records': len(records), 'contribution': helpers.get_contribution_summary(records), 'punch_card_data': _get_punch_card_data(records), } @blueprint.route('/large_commits') @decorators.jsonify('commits') @decorators.exception_handler() @decorators.record_filter() def get_commit_report(records): loc_threshold = int(flask.request.args.get('loc_threshold') or 0) response = [] for record in records: if ('loc' in record) and (record['loc'] > loc_threshold): nr = dict([(k, record[k]) for k in ['loc', 'subject', 'module', 'primary_key', 'change_id']]) response.append(nr) return response @blueprint.route('/single_plus_two_reviews') @decorators.jsonify() @decorators.exception_handler() @decorators.record_filter(ignore='metric') def get_single_plus_two_reviews_report(records): memory_storage_inst = vault.get_memory_storage() plus_twos = collections.defaultdict(list) for record in records: if record['record_type'] != 'mark': continue if (record['branch'] == 'master' and record['type'] == 'CRVW' and record['value'] == +2): review_id = record['review_id'] review = memory_storage_inst.get_record_by_primary_key(review_id) if review and review['status'] == 'MERGED': plus_twos[review_id].append(record) response = [] for review_id in plus_twos.keys(): if len(plus_twos[review_id]) < 2: mark = plus_twos[review_id][0] review = memory_storage_inst.get_record_by_primary_key( mark['review_id']) response.append({'review_by': review['user_id'], 'mark_by': mark['user_id'], 'subject': review['subject'], 'url': review['url'], 'project': review['project']}) return response
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2018 - 2020 Mario Mlačak, [email protected] # Licensed under 3-clause (modified) BSD license. See LICENSE for details. from consts import DEFAULT_LINE_WIDTH from board import Board from colors import ColorsItem from def_mark import MarkDefItem from board_view import BoardView from draw_mark import DrawMark from scene import Scene class DrawScene(DrawMark): def __init__(self, scene, max_width_pix, max_height_pix, line_width=DEFAULT_LINE_WIDTH, color_str="#FFFFFF"): assert isinstance(scene, Scene) assert isinstance(scene.board, Board) assert isinstance(scene.board_view, BoardView) super(DrawScene, self).__init__(scene.board, max_width_pix, max_height_pix, line_width=DEFAULT_LINE_WIDTH, color_str="#FFFFFF", board_view=scene.board_view) self.scene = scene def draw_scene(self, colors_item, mark_def_item=None): assert isinstance(colors_item, ColorsItem) assert isinstance(mark_def_item, (MarkDefItem, type(None))) self.draw_board(colors_item) fmdef = mark_def_item.field_mark_def if isinstance(mark_def_item, MarkDefItem) else None self.draw_all_field_markers(self.scene.field_markers, fmdef=fmdef, cmark=colors_item.marker) adef = mark_def_item.arrow_def if isinstance(mark_def_item, MarkDefItem) else None self.draw_all_arrows(self.scene.arrows, adef=adef, cmark=colors_item.arrow) fdef = mark_def_item.font_def if isinstance(mark_def_item, MarkDefItem) else None self.draw_all_texts(self.scene.texts, fdef=fdef, cmark=colors_item.text) TEST_BOARD_SIZE_PIX = 1200 TEST_FIELD_SIZE_PIX = 200 # 100 # 400 def test_scene(func_name, board_desc=None, name='', include_odd=False, *args, **kwargs): sc = SceneCommon() func = getattr(sc, func_name) for bt in BoardType.iter(include_none=False, include_even=True, include_odd=include_odd): scene = func(bt, *args, **kwargs) if func_name == 'intro_board': w = TEST_BOARD_SIZE_PIX h = TEST_BOARD_SIZE_PIX fs_px = w / scene.board_view.width else: w = scene.board_view.width * TEST_FIELD_SIZE_PIX h = scene.board_view.height * TEST_FIELD_SIZE_PIX fs_px = TEST_FIELD_SIZE_PIX ds = DrawScene(scene, w, h, fs_px) ci = Colors[ bt ] ds.draw_scene(ci) fn = func_name[ 6 : ] if func_name.startswith('intro_') else func_name btn = bt.get_symbol() file_path = 'temp/%s.%s.%s.IGNORE.png' % (fn, btn, name) ds.save_image(file_path) if __name__ == '__main__': from board import BoardType from colors import Colors from scene_common import SceneCommon test_scene('intro_piece') test_scene('intro_castling', name='K') test_scene('intro_castling', move_king=-2, name='-2') test_scene('intro_castling', move_king=2, name='2') test_scene('intro_en_passant') test_scene('intro_rush') test_scene('intro_board')
#!/usr/bin/env python3 # Copyright 2019 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import re from orderedattrdict import AttrDict from pysnmp.hlapi import * from lib.utilities import * SNMP_PORT = 161 PUBLIC = 'public' BRIDGE_MIB = 'BRIDGE-MIB' DOT_1D_TP_FDB_PORT = 'dot1dTpFdbPort' class GetMgmtSwitchConfig(object): def __init__(self, log): self.log = log def get_port_mac(self, rack, switch_mgmt_ipv4): self.mac_port = [] for ( error_indication, error_status, error_index, var_binds) in nextCmd( SnmpEngine(), CommunityData(PUBLIC), UdpTransportTarget((switch_mgmt_ipv4, SNMP_PORT)), ContextData(), ObjectType(ObjectIdentity(BRIDGE_MIB, DOT_1D_TP_FDB_PORT)), lexicographicMode=False): if error_indication: self.log.error(error_indication) sys.exit(1) elif error_status: self.log.error('%s at %s' % ( error_status.prettyPrint(), error_index and var_binds[int(error_index) - 1][0] or '?')) sys.exit(1) else: _dict = AttrDict() for var_bind in var_binds: match = re.search( (r'^%s::%s\.(' + '(%s)' + ' = ' + r'(\d+)$') % ( BRIDGE_MIB, DOT_1D_TP_FDB_PORT, PATTERN_MAC), str(var_bind)) mac = match.group(1) port = str(match.group(3)) _dict[port] = mac self.log.info( 'Rack: %s - MAC: %s - port: %s' % (rack, mac, port)) self.mac_port.append(_dict) return self.mac_port
# This code updates the projects json with languages until it reaches the maximum requests limit. # We have only 6000 requests per hour (GitHub limits) import gzip import json import sys from github import Github # finishes our session for now and updates the file def finalize(): print('finished for now... writing data and exiting.') with open('processed_data/2015-01.json', 'w') as outfile: json.dump(projects, outfile) exit() with open('github/config', 'r') as config_file: user, password = config_file.readline().split(' ') g = Github(user, password) # insert github user # read the projects file with open('processed_data/2015-01.json', 'r') as data_file: projects = json.load(data_file) for repo in projects.keys(): try: print("getting language for {}".format(repo)) if 'languages' not in projects[repo]: # get the languages for a project projects[repo]['languages'] = g.get_repo(int(repo)).get_languages() except Exception as e: print(e) if 'Maximum number of login attempts exceeded. Please try again later.' in str(e) or 'API rate limit exceeded' in str(e): finalize() # we are out of requests else: projects[repo]['languages'] = {} # no languages for this project finalize()
# -*- coding: utf-8 -*- """Calculates the current version number. If possible, uses output of “git describe” modified to conform to the versioning scheme that setuptools uses (see PEP 386). Releases must be labelled with annotated tags (signed tags are annotated) of the following format: v<num>(.<num>)+ [ {a|b|c|rc} <num> (.<num>)* ] If “git describe” returns an error (likely because we're in an unpacked copy of a release tarball, rather than a git working copy), or returns a tag that does not match the above format, version is read from RELEASE-VERSION file. To use this script, simply import it your setup.py file, and use the results of getVersion() as your package version: import version setup( version=version.getVersion(), . . . ) This will automatically update the RELEASE-VERSION file. The RELEASE-VERSION file should *not* be checked into git but it *should* be included in sdist tarballs (as should version.py file). To do this, run: echo include RELEASE-VERSION version.py >>MANIFEST.in echo RELEASE-VERSION >>.gitignore With that setup, a new release can be labelled by simply invoking: git tag -s v1.0 Taken from: https://gist.github.com/mina86/8782771 """ __author__ = ('Douglas Creager <[email protected]>', 'Michal Nazarewicz <[email protected]>') __license__ = 'This file is placed into the public domain.' __maintainer__ = 'Michal Nazarewicz' __email__ = '[email protected]' __all__ = ('getVersion') import re import subprocess import sys RELEASE_VERSION_FILE = 'RELEASE-VERSION' # http://www.python.org/dev/peps/pep-0386/ _PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?' _PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (_PEP386_SHORT_VERSION_RE) _GIT_DESCRIPTION_RE = r'^v(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (_PEP386_SHORT_VERSION_RE) def readGitVersion(): try: proc = subprocess.Popen(('git', 'describe', '--long', '--match', 'v[0-9]*.*'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) data, _ = proc.communicate() if proc.returncode: return None ver = data.splitlines()[0].strip() except: return None if not ver: return None m = re.search(_GIT_DESCRIPTION_RE, ver) if not m: sys.stderr.write('version: git description (%s) is invalid, ignoring\n' % ver) return None commits = int(m.group('commits')) if not commits: return m.group('ver') else: return '%s.post%d.dev%d' % (m.group('ver'), commits, int(m.group('sha'), 16)) def readReleaseVersion(): try: fd = open(RELEASE_VERSION_FILE) try: ver = fd.readline().strip() finally: fd.close() if not re.search(_PEP386_VERSION_RE, ver): sys.stderr.write('version: release version (%s) is invalid, will use it anyway\n' % ver) return ver except: return None def writeReleaseVersion(version): fd = open(RELEASE_VERSION_FILE, 'w') fd.write('%s\n' % version) fd.close() def getVersion(): release_version = readReleaseVersion() version = readGitVersion() or release_version if not version: raise ValueError('Cannot find the version number') if version != release_version: writeReleaseVersion(version) return version if __name__ == '__main__': print getVersion()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import segment as segment_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_log import log as logging from oslo_utils import uuidutils from neutron.db import api as db_api from neutron.objects import base as base_obj from neutron.objects import network as network_obj LOG = logging.getLogger(__name__) NETWORK_TYPE = segment_def.NETWORK_TYPE PHYSICAL_NETWORK = segment_def.PHYSICAL_NETWORK SEGMENTATION_ID = segment_def.SEGMENTATION_ID NETWORK_ID = 'network_id' def _make_segment_dict(obj): """Make a segment dictionary out of an object.""" return {'id': obj.id, NETWORK_TYPE: obj.network_type, PHYSICAL_NETWORK: obj.physical_network, SEGMENTATION_ID: obj.segmentation_id, NETWORK_ID: obj.network_id} def add_network_segment(context, network_id, segment, segment_index=0, is_dynamic=False): with db_api.context_manager.writer.using(context): netseg_obj = network_obj.NetworkSegment( context, id=uuidutils.generate_uuid(), network_id=network_id, network_type=segment.get(NETWORK_TYPE), physical_network=segment.get(PHYSICAL_NETWORK), segmentation_id=segment.get(SEGMENTATION_ID), segment_index=segment_index, is_dynamic=is_dynamic) netseg_obj.create() registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, trigger=add_network_segment, context=context, segment=netseg_obj) segment['id'] = netseg_obj.id LOG.info("Added segment %(id)s of type %(network_type)s for network " "%(network_id)s", {'id': netseg_obj.id, 'network_type': netseg_obj.network_type, 'network_id': netseg_obj.network_id}) def get_network_segments(context, network_id, filter_dynamic=False): return get_networks_segments( context, [network_id], filter_dynamic)[network_id] def get_networks_segments(context, network_ids, filter_dynamic=False): if not network_ids: return {} with db_api.context_manager.reader.using(context): filters = { 'network_id': network_ids, } if filter_dynamic is not None: filters['is_dynamic'] = filter_dynamic objs = network_obj.NetworkSegment.get_objects(context, **filters) result = {net_id: [] for net_id in network_ids} for record in objs: result[record.network_id].append(_make_segment_dict(record)) return result def get_segment_by_id(context, segment_id): with db_api.context_manager.reader.using(context): net_obj = network_obj.NetworkSegment.get_object(context, id=segment_id) if net_obj: return _make_segment_dict(net_obj) def get_dynamic_segment(context, network_id, physical_network=None, segmentation_id=None): """Return a dynamic segment for the filters provided if one exists.""" with db_api.context_manager.reader.using(context): filters = { 'network_id': network_id, 'is_dynamic': True, } if physical_network: filters['physical_network'] = physical_network if segmentation_id: filters['segmentation_id'] = segmentation_id pager = base_obj.Pager(limit=1) objs = network_obj.NetworkSegment.get_objects( context, _pager=pager, **filters) if objs: return _make_segment_dict(objs[0]) else: LOG.debug("No dynamic segment found for " "Network:%(network_id)s, " "Physical network:%(physnet)s, " "segmentation_id:%(segmentation_id)s", {'network_id': network_id, 'physnet': physical_network, 'segmentation_id': segmentation_id}) def delete_network_segment(context, segment_id): """Release a dynamic segment for the params provided if one exists.""" with db_api.context_manager.writer.using(context): network_obj.NetworkSegment.delete_objects(context, id=segment_id)
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013 - 2018 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from superdesk.io.feed_parsers.stt_newsml import STTNewsMLFeedParser from superdesk.io.registry import register_feed_parser from superdesk.errors import ParserError from . import utils import logging logger = logging.getLogger(__name__) IPTC_NS = 'http://iptc.org/std/nar/2006-10-01/' class NTBSTTNewsMLFeedParser(STTNewsMLFeedParser): """ Feed Parser which can parse STT variant of NewsML """ NAME = 'ntb_sttnewsml' label = "NTB STT NewsML" def can_parse(self, xml): return xml.tag.endswith('newsItem') def parse(self, xml, provider=None): try: item = super().parse(xml, provider)[0] # SDNTB-462 requires that slugline is removed del item['slugline'] sport = bool(self.root.xpath('//iptc:subject[@type="cpnat:abstract" and @qcode="sttsubj:15000000"]', namespaces={'iptc': IPTC_NS})) cat = utils.SPORT_CATEGORY if sport else utils.DEFAULT_CATEGORY category = {'qcode': cat, 'name': cat, 'scheme': 'category'} item['subject'] = utils.filter_missing_subjects(item.get('subject')) item['subject'].append(category) utils.set_default_service(item) return [item] except Exception as ex: raise ParserError.newsmlTwoParserError(ex, provider) register_feed_parser(NTBSTTNewsMLFeedParser.NAME, NTBSTTNewsMLFeedParser())
# -*- coding: utf-8 -*- """Functional tests using WebTest. See: http://webtest.readthedocs.org/ """ from flask import url_for from ceraon.user.models import User from .factories import UserFactory class TestLoggingIn: """Login.""" def test_can_log_in_returns_200(self, user, testapp): """Login successful.""" # Goes to homepage res = testapp.get('/login') # Fills out login form in navbar form = res.forms['loginForm'] form['email'] = user.email form['password'] = 'example' # Submits res = form.submit().follow() assert res.status_code == 200 def test_sees_error_message_if_password_is_incorrect(self, user, testapp): """Show error if password is incorrect.""" # Goes to homepage res = testapp.get('/login') # Fills out login form, password incorrect form = res.forms['loginForm'] form['email'] = user.email form['password'] = 'wrong' # Submits res = form.submit() # sees error assert 'Invalid password' in res def test_sees_error_message_if_email_doesnt_exist(self, user, testapp): """Show error if email doesn't exist.""" # Goes to homepage res = testapp.get('/login') # Fills out login form, password incorrect form = res.forms['loginForm'] form['email'] = '[email protected]' form['password'] = 'myprecious' # Submits res = form.submit() # sees error print(res) assert 'Unknown email' in res class TestRegistering: """Register a user.""" def test_can_register(self, user, testapp): """Register a new user.""" old_count = len(User.query.all()) # Goes to registration page res = testapp.get('/register/') # Fills out the form form = res.forms['registerForm'] form['first_name'] = 'foo' form['last_name'] = 'foo' form['email'] = '[email protected]' form['password'] = 'secret' form['confirm'] = 'secret' # Submits res = form.submit().follow() assert res.status_code == 200 # A new user was created assert len(User.query.all()) == old_count + 1 def test_sees_error_message_if_passwords_dont_match(self, user, testapp): """Show error if passwords don't match.""" # Goes to registration page res = testapp.get(url_for('public.register')) # Fills out form, but passwords don't match form = res.forms['registerForm'] form['first_name'] = 'foo' form['last_name'] = 'foo' form['email'] = '[email protected]' form['password'] = 'secret' form['confirm'] = 'secrets' # Submits res = form.submit() # sees error message assert 'Passwords must match' in res def test_sees_error_message_if_user_already_registered(self, user, testapp): """Show error if user already registered.""" user = UserFactory(active=True) # A registered user user.save() # Goes to registration page res = testapp.get(url_for('public.register')) # Fills out form, but email is already registered form = res.forms['registerForm'] form['first_name'] = 'foo' form['last_name'] = 'foo' form['email'] = user.email form['password'] = 'secret' form['confirm'] = 'secret' # Submits res = form.submit() # sees error assert 'Email already registered' in res
""" The prime 41, can be written as the sum of six consecutive primes: 41 = 2 + 3 + 5 + 7 + 11 + 13 This is the longest sum of consecutive primes that adds to a prime below one-hundred. The longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953. Which prime, below one-million, can be written as the sum of the most consecutive primes? """ from utils import get_prime_list def run(): """ Solution: Brute force by first computing the cumulative sum of the first n primes. Any consecutive prime between the ith and jth prime can be expressed as cum_sum[j] - cum_sum[i]. """ N = 1000000 primes = get_prime_list(N) prime_set = set(primes) cum_sum = [0 for i in xrange(1, len(primes) + 1)] for i in xrange(1, len(primes)): cum_sum[i] = primes[i] + cum_sum[i - 1] longest, best = 21, 953 for i in xrange(len(primes)): for j in xrange(i - longest): l = i - j n = cum_sum[i] - cum_sum[j] if l <= longest or n > N: break if n in prime_set: longest = l best = n return best
# Copyright 2008 by Jens Andersson and Wade Brainerd. # This file is part of Colors! XO. # # Colors is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Colors is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Colors. If not, see <http://www.gnu.org/licenses/>. #!/usr/bin/env python """Colors! XO painting activity. Based on Colors! by Collecting Smiles.""" # Note- I had to rename the self.canvas object *self.easel* for now, because the stupid sugar.window.Window # super class of Activity has its own "canvas" member. If anyone has a better name, feel free to change it. # Reference links: # # Colors! - http://wiki.laptop.org/go/Colors! # PyGTK - http://www.pygtk.org/pygtk2reference/ # Sugar - http://dev.laptop.org/~cscott/joyride-1477-api/ # GStreamer - http://pygstdocs.berlios.de/pygst-reference/index.html # Activities - http://wiki.laptop.org/go/Sugar_Activity_Tutorial # Sharing - http://wiki.laptop.org/go/Shared_Sugar_Activities # Import standard Python modules. import logging, os, sys, math, time, copy, json, tempfile from gettext import gettext as _ # Prefer local modules. from sugar.activity import activity sys.path.insert(0, activity.get_bundle_path()) try: import json json.dumps except (ImportError, AttributeError): import simplejson as json # Import the C++ component of the activity. from colorsc import * # Import PyGTK. import gobject, pygtk, gtk, pango # Needed to avoid thread crashes with GStreamer gobject.threads_init() # Import PyGame. Used for camera and sound. try: from pygame import camera, transform, surface, mask except ImportError: print "No pygame available." # Import DBUS and mesh networking modules. import dbus, telepathy, telepathy.client from dbus import Interface from dbus.service import method, signal from dbus.gobject_service import ExportedGObject from sugar.presence.tubeconn import TubeConnection from sugar.presence import presenceservice from sugar.datastore import datastore # Import Sugar UI modules. from sugar import graphics from sugar.graphics import * from sugar.graphics import toggletoolbutton from sugar.graphics.menuitem import MenuItem # Import GStreamer (for camera access). #import pygst, gst # Initialize logging. log = logging.getLogger('Colors') log.setLevel(logging.DEBUG) logging.basicConfig() # Track memory leaks. #import gc #gc.set_debug(gc.DEBUG_LEAK) # DBUS identifiers are used to uniquely identify the activity for network communcations. DBUS_IFACE = "org.laptop.community.Colors" DBUS_PATH = "/org/laptop/community/Colors" DBUS_SERVICE = DBUS_IFACE # This is the overlay that appears when the user presses the Palette toobar button. It covers the entire screen, # and offers controls for brush type, size, opacity, and color. # # The color wheel and triangle are rendered into GdkImage objects by C++ code in palette.h / palette.cpp which # is compiled into the colorsc module. class BrushControlsPanel(gtk.HBox): PALETTE_SIZE = int(7.0 * style.GRID_CELL_SIZE) & ~1 PREVIEW_SIZE = int(2.5 * style.GRID_CELL_SIZE) & ~1 BRUSHTYPE_SIZE = int(1.0 * style.GRID_CELL_SIZE) & ~1 def __init__ (self): gtk.HBox.__init__(self) self.set_property("spacing", 5) self.set_border_width(30) # Locally managed Brush object. self.brush = Brush() # Palette wheel widget. palbox = gtk.VBox() self.palette = Palette(BrushControlsPanel.PALETTE_SIZE) self.paletteimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), BrushControlsPanel.PALETTE_SIZE, BrushControlsPanel.PALETTE_SIZE) self.palette.render_wheel(self.paletteimage) self.palette.render_triangle(self.paletteimage) self.palettearea = gtk.DrawingArea() self.palettearea.set_size_request(BrushControlsPanel.PALETTE_SIZE, BrushControlsPanel.PALETTE_SIZE) self.palettearea.add_events(gtk.gdk.POINTER_MOTION_MASK|gtk.gdk.BUTTON_PRESS_MASK|gtk.gdk.BUTTON_RELEASE_MASK) self.palettearea.connect('expose-event', self.on_palette_expose) self.palettearea.connect('motion-notify-event', self.on_palette_mouse) self.palettearea.connect('button-press-event', self.on_palette_mouse) self.palettearea.connect('button-release-event', self.on_palette_mouse) palbox.pack_start(self.palettearea, False, False) # Brush size scrollbar, label and pressure sensitivity checkbox. sizebox = gtk.VBox() sizelabel = gtk.Label(_('Size')) sizebox.pack_end(sizelabel, False) self.size = gtk.Adjustment(50, 1, 130, 1, 10, 10) self.sizebar = gtk.VScale(self.size) self.sizebar.set_property("draw-value", False) self.sizebar.set_property("inverted", True) self.sizebar.connect('value-changed', self.on_size_change) sizebox.pack_end(self.sizebar) self.sizecheck = gtk.CheckButton(_('Sensitive')) self.sizecheck.connect('toggled', self.on_variable_size_toggle) sizebox.pack_end(self.sizecheck, False) # Brush opacity scrollbar, label and pressure sensitivity checkbox. opacitybox = gtk.VBox() opacitylabel = gtk.Label(_('Opacity')) opacitybox.pack_end(opacitylabel, False) self.opacity = gtk.Adjustment(0, 0, 1.1, 0.001, 0.1, 0.1) self.opacitybar = gtk.VScale(self.opacity) self.opacitybar.set_property("draw-value", False) self.opacitybar.set_property("inverted", True) self.opacitybar.connect('value-changed', self.on_opacity_change) opacitybox.pack_end(self.opacitybar) self.opacitycheck = gtk.CheckButton(_('Sensitive')) self.opacitycheck.connect('toggled', self.on_variable_opacity_toggle) opacitybox.pack_end(self.opacitycheck, False) # Force column scrollbars to be equal width. group = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL) group.add_widget(sizebox) group.add_widget(opacitybox) # Brush preview widget. brushbox = gtk.VBox() brushbox.set_property("spacing", 20) self.preview = BrushPreview(BrushControlsPanel.PREVIEW_SIZE) self.previewimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), BrushControlsPanel.PREVIEW_SIZE, BrushControlsPanel.PREVIEW_SIZE) self.previewarea = gtk.DrawingArea() self.previewarea.set_size_request(BrushControlsPanel.PREVIEW_SIZE, BrushControlsPanel.PREVIEW_SIZE) self.previewarea.connect('expose-event', self.on_preview_expose) brushbox.pack_start(self.previewarea, False) # Brush type selection widgets. self.brushbtns = [] brushtbl = gtk.Table(1, 2) brushtbl.set_col_spacings(5) brushtbl.attach(self.create_brushtype_widget(BrushType.BRUSHTYPE_SOFT), 0, 1, 0, 1) brushtbl.attach(self.create_brushtype_widget(BrushType.BRUSHTYPE_HARD), 1, 2, 0, 1) brushbox.pack_start(brushtbl, False) self.pack_start(sizebox, False, False) self.pack_start(opacitybox, False, False) self.pack_start(palbox, True, False) self.pack_start(brushbox, False) self.in_toggle_cb = False def create_brushtype_widget (self, type): brusharea = gtk.DrawingArea() brusharea.set_size_request(BrushControlsPanel.BRUSHTYPE_SIZE, BrushControlsPanel.BRUSHTYPE_SIZE) brusharea.connect('expose-event', self.on_brushtype_expose) brusharea.preview = BrushPreview(BrushControlsPanel.BRUSHTYPE_SIZE) brusharea.preview.brush.size = int(BrushControlsPanel.BRUSHTYPE_SIZE*0.75) brusharea.preview.brush.type = type brusharea.preview.brush.color = Color(0,0,0,0) brusharea.previewimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), BrushControlsPanel.BRUSHTYPE_SIZE, BrushControlsPanel.BRUSHTYPE_SIZE) brusharea.preview.render(brusharea.previewimage) brushbtn = gtk.ToggleButton() brushbtn.set_image(brusharea) brushbtn.connect('toggled', self.on_brushtype_toggle) brushbtn.brushtype = type self.brushbtns.append(brushbtn) return brushbtn def set_brush (self, brush): self.brush = brush self.opacity.set_value(brush.opacity) self.size.set_value(brush.size) self.palette.set_color(brush.color) self.opacitycheck.set_active((brush.control & Brush.BRUSHCONTROL_VARIABLEOPACITY) != 0) self.sizecheck.set_active((brush.control & Brush.BRUSHCONTROL_VARIABLESIZE) != 0) self.update_brushtype_btns() def on_size_change (self, event): self.brush.size = int(self.size.get_value()) self.previewarea.queue_draw() def on_opacity_change (self, event): self.brush.opacity = self.opacity.get_value() self.previewarea.queue_draw() def on_variable_size_toggle (self, event): if self.sizecheck.get_active(): self.brush.control |= Brush.BRUSHCONTROL_VARIABLESIZE else: self.brush.control &= ~Brush.BRUSHCONTROL_VARIABLESIZE def on_variable_opacity_toggle (self, event): if self.opacitycheck.get_active(): self.brush.control |= Brush.BRUSHCONTROL_VARIABLEOPACITY else: self.brush.control &= ~Brush.BRUSHCONTROL_VARIABLEOPACITY def on_palette_expose (self, widget, event): gc = self.palettearea.get_style().fg_gc[gtk.STATE_NORMAL] old_foreground = gc.foreground old_line_width = gc.line_width # Draw palette image. self.palette.render_triangle(self.paletteimage) self.palettearea.window.draw_image(gc, self.paletteimage, 0, 0, 0, 0, -1, -1) # Draw circles to indicate selected color. # todo- Better looking circles. r = int(self.palette.WHEEL_WIDTH*0.75) gc.foreground = self.palettearea.get_colormap().alloc_color(16384,16384,16384) gc.line_width = 2 wheel_pos = self.palette.get_wheel_pos() tri_pos = self.palette.get_triangle_pos() self.palettearea.window.draw_arc(gc, False, int(wheel_pos.x-r/2+2), int(wheel_pos.y-r/2+2), r-4, r-4, 0, 360*64) self.palettearea.window.draw_arc(gc, False, int(tri_pos.x-r/2+2), int(tri_pos.y-r/2+2), r-4, r-4, 0, 360*64) gc.foreground = self.palettearea.get_colormap().alloc_color(65535,65535,65535) gc.line_width = 2 self.palettearea.window.draw_arc(gc, False, int(wheel_pos.x-r/2), int(wheel_pos.y-r/2), r, r, 0, 360*64) self.palettearea.window.draw_arc(gc, False, int(tri_pos.x-r/2), int(tri_pos.y-r/2), r, r, 0, 360*64) gc.foreground = old_foreground gc.line_width = old_line_width def on_palette_mouse (self, widget, event): if event.state & gtk.gdk.BUTTON1_MASK: widget.grab_focus() self.palette.process_mouse(int(event.x), int(event.y)) self.palettearea.queue_draw() self.brush.color = self.palette.get_color() self.previewarea.queue_draw() if event.type == gtk.gdk.BUTTON_RELEASE: self.palette.process_mouse_release() def on_preview_expose (self, widget, event): self.preview.brush = self.brush self.preview.brush.size = self.brush.size*2 # Mimic 2x canvas scaling. self.preview.render(self.previewimage) self.previewarea.window.draw_image(widget.get_style().fg_gc[gtk.STATE_NORMAL], self.previewimage, 0, 0, 0, 0, -1, -1) def on_brushtype_expose (self, widget, event): widget.window.draw_image(widget.get_style().fg_gc[gtk.STATE_NORMAL], widget.previewimage, 0, 0, 0, 0, -1, -1) # Manually implemented radio button using ToggleButtons. def update_brushtype_btns (self): for b in self.brushbtns: b.set_active(b.brushtype == self.brush.type) def on_brushtype_toggle (self, widget): if self.in_toggle_cb: return self.in_toggle_cb = True self.brush.type = widget.brushtype self.update_brushtype_btns() self.previewarea.queue_draw() self.in_toggle_cb = False # This is the overlay that appears when playing back large numbers of drawing commands. # It simply shows how much work is left to do and that progress is taking place. class ProgressPanel(gtk.VBox): def __init__ (self): gtk.VBox.__init__(self) self.set_border_width(50) self.label = gtk.Label() self.label.set_markup("<span foreground='white' size='xx-large'>"+_("Working...")+"</span>") self.progress = gtk.ProgressBar() self.progress.set_fraction(0.5) self.progress.set_orientation(gtk.PROGRESS_LEFT_TO_RIGHT) vbox = gtk.VBox() vbox.set_property("spacing", 20) vbox.pack_start(self.label, False) vbox.pack_start(self.progress, False) self.pack_start(vbox, True, False) # This is the overlay that appears when the Help button is pressed. class HelpPanel(gtk.VBox): def __init__ (self): gtk.VBox.__init__(self) # Add the context sensitive help. self.helplabel = gtk.Label() self.helplabel.set_padding(10, 10) self.helplabel.set_markup( ''' <span font_family="monospace" size="large" color="#ffffff"> Keyboard controls: Gamepad controls: Space - Open palette Up - Zoom in v - Video paint Down - Zoom in Hand drag - Scroll drag Left - Center canvas Arrows - Scroll Right - Scroll drag Alt click - Pick up color Square - Open palette Ctrl Up - Zoom in Check - Undo Ctrl Down - Zoom in Circle - Pick Ctrl A - Center canvas X - Paint Ctrl Z - Undo Ctrl C - Copy to clipboard Ctrl E - Erase image Alt Enter - Full screen Alt + letter - Save brush letter - Restore brush </span> ''' ) self.helpbox = gtk.EventBox() self.helpbox.modify_bg(gtk.STATE_NORMAL, self.helpbox.get_colormap().alloc_color('#000000')) self.helpbox.add(self.helplabel) vbox = gtk.VBox() vbox.set_property("spacing", 20) vbox.pack_start(self.helpbox, True, True) self.pack_start(vbox, True, True) # This is the main Colors! activity class. # # It owns the main application window, the painting canvas, and all the various toolbars and options. class Colors(activity.Activity, ExportedGObject): # Application mode definitions. MODE_INTRO = 0 MODE_PLAYBACK = 1 MODE_CANVAS = 2 MODE_PICK = 3 MODE_SCROLL = 4 MODE_PALETTE = 5 MODE_REFERENCE = 6 # Button definitions BUTTON_PALETTE = 1<<0 #BUTTON_REFERENCE = 1<<1 BUTTON_VIDEOPAINT = 1<<2 BUTTON_SCROLL = 1<<3 BUTTON_PICK = 1<<4 BUTTON_ZOOM_IN = 1<<5 BUTTON_ZOOM_OUT = 1<<6 BUTTON_CENTER = 1<<7 BUTTON_TOUCH = 1<<8 BUTTON_CONTROL = 1<<9 BUTTON_UNDO = 1<<10 # Number of drawing steps to execute between progress bar updates. More updates means faster overall drawing # but a less responsive UI. PROGRESS_DELTA = 50 def __init__ (self, handle): activity.Activity.__init__(self, handle) self.set_title(_("Colors!")) # Uncomment to test out a bunch of the C++ heavy lifting APIs. Takes awhile on the XO though. #self.benchmark() # Get activity size. What we really need is the size of the canvasarea, not including the toolbox. # This will be figured out on the first paint event, once everything is resized. self.width = gtk.gdk.screen_width() self.height = gtk.gdk.screen_height() # Set the initial mode to None, it will be set to Intro on the first update. self.mode = None # Set up various systems. self.init_input() self.init_zoom() self.init_scroll() # Build the toolbar. self.build_toolbar() # Set up drawing canvas (which is also the parent for any popup widgets like the brush controls). self.build_canvas() # Build the brush control popup window. self.build_brush_controls() # Build the progress display popup window. self.build_progress() # Build the help popup window. self.build_help() # Start camera processing. self.init_camera() # Set up mesh networking. self.init_mesh() # Scan for input devices. self.init_input_devices() # This has to happen last, because it calls the read_file method when restoring from the Journal. self.set_canvas(self.easelarea) # Reveal the main window (but not the panels). self.show_all() self.brush_controls.hide() self.progress.hide() self.help.hide() self.overlay_active = False # Start it running. self.update_timer = None self.update() # store event.get_axis() of last event to ignore fake pressure # when system doesnt support gtk.gdk.AXIS_PRESSURE but # event.get_axis(gtk.gdk.AXIS_PRESSURE) returns 0.0 value self._prev_AXIS_PRESSURE = None #----------------------------------------------------------------------------------------------------------------- # User interface construction def build_canvas (self): # The canvasarea is the main window which covers the entire screen below the toolbar. self.easelarea = gtk.Layout() self.easelarea.set_size_request(gtk.gdk.screen_width(), gtk.gdk.screen_height()) self.easelarea.set_flags(gtk.CAN_FOCUS) self.set_double_buffered(False) self.easelarea.set_double_buffered(False) # Set up GTK events for the canvasarea. self.easelarea.add_events(gtk.gdk.POINTER_MOTION_MASK|gtk.gdk.POINTER_MOTION_HINT_MASK) self.easelarea.add_events(gtk.gdk.BUTTON_PRESS_MASK|gtk.gdk.BUTTON_RELEASE_MASK) self.easelarea.add_events(gtk.gdk.KEY_PRESS_MASK|gtk.gdk.KEY_RELEASE_MASK) # The actual drawing canvas is at 1/2 resolution, which improves performance by 4x and still leaves a decent # painting resolution of 600x400 on the XO. self.easel = Canvas(gtk.gdk.screen_width()/2, gtk.gdk.screen_height()/2) self.set_brush(self.easel.brush) # Map of keyboard keys to brushes. self.brush_map = {} # The Canvas internally stores the image as 32bit. When rendering, it scales up and blits into canvasimage, # which is in the native 565 resolution of the XO. Then canvasimage is drawn into the canvasarea DrawingArea. self.easelimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), self.width, self.height) # Now that we have a canvas, connect the rest of the events. self.easelarea.connect('expose-event', self.on_easelarea_expose) self.connect('key-press-event', self.on_key_event) self.connect('key-release-event', self.on_key_event) self.easelarea.connect('button-press-event', self.on_mouse_event) self.easelarea.connect('button-release-event', self.on_mouse_event) self.easelarea.connect('motion-notify-event', self.on_mouse_event) def build_brush_controls (self): self.brush_controls = BrushControlsPanel() self.brush_controls.set_size_request(gtk.gdk.screen_width(), gtk.gdk.screen_height()) self.easelarea.put(self.brush_controls, 0, 0) def build_progress (self): self.progress = ProgressPanel() self.progress.set_size_request(gtk.gdk.screen_width(), gtk.gdk.screen_height()) self.easelarea.put(self.progress, 0, 0) def build_help (self): self.help = HelpPanel() self.help.set_size_request(gtk.gdk.screen_width(), gtk.gdk.screen_height()) self.easelarea.put(self.help, 0, 0) def build_toolbar (self): self.add_accel_group(gtk.AccelGroup()) # Painting controls (palette, zoom, etc) self.palettebtn = toggletoolbutton.ToggleToolButton('palette') self.palettebtn.set_tooltip(_("Palette")) self.palettebtn.connect('clicked', self.on_palette) self.brushpreview = BrushPreview(Canvas.VIDEO_HEIGHT) self.brushpreviewimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), Canvas.VIDEO_HEIGHT, Canvas.VIDEO_HEIGHT) self.brushpreviewarea = gtk.DrawingArea() self.brushpreviewarea.set_size_request(Canvas.VIDEO_HEIGHT, Canvas.VIDEO_HEIGHT) self.brushpreviewarea.connect('expose-event', self.on_brushpreview_expose) self.brushpreviewitem = gtk.ToolItem() self.brushpreviewitem.add(self.brushpreviewarea) # todo- Color picker button, similar semantics to scroll button. self.zoomsep = gtk.SeparatorToolItem() self.zoomsep.set_expand(True) self.zoomsep.set_draw(False) self.zoomoutbtn = toolbutton.ToolButton('zoom-out') self.zoomoutbtn.set_tooltip(_("Zoom Out")) self.zoomoutbtn.connect('clicked', self.on_zoom_out) self.zoomoutbtn.props.accelerator = '<Ctrl>Down' self.zoominbtn = toolbutton.ToolButton('zoom-in') self.zoominbtn.set_tooltip(_("Zoom In")) self.zoominbtn.props.accelerator = '<Ctrl>Up' self.zoominbtn.connect('clicked', self.on_zoom_in) self.centerbtn = toolbutton.ToolButton('zoom-original') self.centerbtn.set_tooltip(_("Center Image")) self.centerbtn.connect('clicked', self.on_center) self.centerbtn.props.accelerator = '<Ctrl>A' self.fullscreenbtn = toolbutton.ToolButton('view-fullscreen') self.fullscreenbtn.set_tooltip(_("Fullscreen")) self.fullscreenbtn.connect('clicked', self.on_fullscreen) self.fullscreenbtn.props.accelerator = '<Alt>Enter' self.editsep = gtk.SeparatorToolItem() self.editsep.set_expand(True) self.editsep.set_draw(False) self.undobtn = toolbutton.ToolButton('edit-undo') self.undobtn.set_tooltip(_("Undo")) self.undobtn.connect('clicked', self.on_undo) self.undobtn.props.accelerator = '<Ctrl>Z' self.copybtn = toolbutton.ToolButton('edit-copy') self.copybtn.set_tooltip(_("Copy")) self.copybtn.connect('clicked', self.on_copy) self.copybtn.props.accelerator = '<Ctrl>C' #self.refsep = gtk.SeparatorToolItem() # #self.takerefbtn = toolbutton.ToolButton('take-reference') #self.takerefbtn.set_tooltip(_("Take Reference Picture")) #self.takerefbtn.connect('clicked', self.on_take_reference) #self.take_reference = False # #self.showrefbtn = toggletoolbutton.ToggleToolButton('show-reference') #self.showrefbtn.set_tooltip(_("Show Reference Picture")) #self.showrefbtn.connect('clicked', self.on_show_reference) # self.videopaintsep = gtk.SeparatorToolItem() # self.videopaintbtn = toggletoolbutton.ToggleToolButton('video-paint') self.videopaintbtn.set_tooltip(_("Video Paint")) self.videopaintbtn.connect('clicked', self.on_videopaint) #self.videopaintpreview = gtk.DrawingArea() #self.videopaintpreview.set_size_request(Canvas.VIDEO_WIDTH, Canvas.VIDEO_HEIGHT) #self.videopaintpreview.connect('expose-event', self.on_videopaintpreview_expose) #self.videopaintitem = gtk.ToolItem() #self.videopaintitem.add(self.videopaintpreview) #self.videopaintimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), Canvas.VIDEO_WIDTH, Canvas.VIDEO_HEIGHT) self.videopaint_enabled = False self.clearsep = gtk.SeparatorToolItem() self.clearsep.set_expand(True) self.clearsep.set_draw(False) self.clearbtn = toolbutton.ToolButton('erase') self.clearbtn.set_tooltip(_("Erase Image")) self.clearbtn.connect('clicked', self.on_clear) self.clearbtn.props.accelerator = '<Ctrl>E' self.helpbtn = toggletoolbutton.ToggleToolButton('help') self.helpbtn.set_active(False) self.helpbtn.set_tooltip(_("Show Help")) #self.helpbtn.props.accelerator = '<Ctrl>H' self.helpbtn.connect('clicked', self.on_help) #editbox = activity.EditToolbar() #editbox.undo.props.visible = False #editbox.redo.props.visible = False #editbox.separator.props.visible = False #editbox.copy.connect('clicked', self.on_copy) #editbox.paste.connect('clicked', self.on_paste) paintbox = gtk.Toolbar() paintbox.insert(self.palettebtn, -1) paintbox.insert(self.brushpreviewitem, -1) paintbox.insert(self.zoomsep, -1) paintbox.insert(self.zoomoutbtn, -1) paintbox.insert(self.zoominbtn, -1) paintbox.insert(self.centerbtn, -1) paintbox.insert(self.fullscreenbtn, -1) paintbox.insert(self.editsep, -1) paintbox.insert(self.undobtn, -1) paintbox.insert(self.copybtn, -1) #paintbox.insert(self.refsep, -1) #paintbox.insert(self.takerefbtn, -1) #paintbox.insert(self.showrefbtn, -1) paintbox.insert(self.videopaintsep, -1) paintbox.insert(self.videopaintbtn, -1) paintbox.insert(self.helpbtn, -1) #paintbox.insert(self.videopaintitem, -1) paintbox.insert(self.clearsep, -1) paintbox.insert(self.clearbtn, -1) # Playback controls self.startbtn = toolbutton.ToolButton('media-playback-start') self.startbtn.set_tooltip(_("Start Playback")) self.startbtn.connect('clicked', self.on_play) self.pausebtn = toolbutton.ToolButton('media-playback-pause') self.pausebtn.set_tooltip(_("Pause Playback")) self.pausebtn.connect('clicked', self.on_pause) #self.backonebtn = toolbutton.ToolButton('media-seek-backward') #self.backonebtn.set_tooltip(_("Back One Stroke")) #self.backonebtn.connect('clicked', self.on_back_one) #self.backonebtn.props.accelerator = '<Ctrl>Left' #self.forwardonebtn = toolbutton.ToolButton('media-seek-forward') #self.forwardonebtn.set_tooltip(_("Forward One Stroke")) #self.forwardonebtn.connect('clicked', self.on_forward_one) #self.forwardonebtn.props.accelerator = '<Ctrl>Right' # Position bar self.playbackpossep = gtk.SeparatorToolItem() self.playbackpossep.set_draw(True) self.beginbtn = toolbutton.ToolButton('media-seek-backward') self.beginbtn.set_tooltip(_("Skip To Beginning")) self.beginbtn.connect('clicked', self.on_skip_begin) self.playbackpos = gtk.Adjustment(0, 0, 110, 1, 10, 10) self.playbackposbar = gtk.HScale(self.playbackpos) self.playbackposbar.connect('value-changed', self.on_playbackposbar_change) self.playbackposbar.ignore_change = 0 self.playbackpositem = gtk.ToolItem() self.playbackpositem.set_expand(True) self.playbackpositem.add(self.playbackposbar) self.endbtn = toolbutton.ToolButton('media-seek-forward') self.endbtn.set_tooltip(_("Skip To End")) self.endbtn.connect('clicked', self.on_skip_end) playbox = gtk.Toolbar() playbox.insert(self.startbtn, -1) playbox.insert(self.pausebtn, -1) playbox.insert(self.beginbtn, -1) playbox.insert(self.endbtn, -1) playbox.insert(self.playbackpossep, -1) playbox.insert(self.playbackpositem, -1) # Sample files to learn from. Reads the list from an INDEX file in the data folder. samplebox = gtk.Toolbar() self.samplebtns = [] samples = [] fd = open(activity.get_bundle_path() + '/data/INDEX', 'r') try: samples = json.loads(fd.read()) finally: fd.close() log.debug("Samples: %r", samples) for s in samples: btn = toolbutton.ToolButton('media-playback-start') btn.filename = activity.get_bundle_path() + '/data/' + s['drw'] btn.set_tooltip(s['title']) img = gtk.Image() img.set_from_file(activity.get_bundle_path() + '/data/' + s['icon']) btn.set_icon_widget(img) btn.connect('clicked', self.on_sample) samplebox.insert(btn, -1) self.samplebtns.append(btn) self.webbtn = toolbutton.ToolButton('web') self.webbtn.set_tooltip(_("Colors! Gallery")) self.webbtn.connect('clicked', self.on_web) self.samplesep = gtk.SeparatorToolItem() self.samplesep.set_draw(False) self.samplesep.set_expand(True) samplebox.insert(self.samplesep, -1) samplebox.insert(self.webbtn, -1) toolbar = activity.ActivityToolbox(self) toolbar.add_toolbar(_("Paint"),paintbox) #toolbar.add_toolbar(_("Edit"),editbox) toolbar.add_toolbar(_("Watch"),playbox) toolbar.add_toolbar(_("Learn"),samplebox) toolbar.show_all() self.set_toolbox(toolbar) # Add Keep As button to activity toolbar. activity_toolbar = toolbar.get_activity_toolbar() keep_palette = activity_toolbar.keep.get_palette() menu_item = MenuItem(_('Keep to PNG')) menu_item.connect('activate', self.on_export_png) keep_palette.menu.append(menu_item) menu_item.show() #----------------------------------------------------------------------------------------------------------------- # Camera access # # The new camera module from Pygame, by Nirav Patel, is used for camera access. # It was only recently added, so we have to handle the case where the module doesn't exist. def init_camera (self): self.camera_enabled = False self.videopaintbtn.set_sensitive(False) try: camera_list = camera.list_cameras() if len(camera_list): self.cam = camera.Camera(camera_list[0],(320,240),"RGB") self.camcapture = surface.Surface((320,240),0,16,(63488,2016,31,0)) self.camsmall = surface.Surface((240,180),0,self.camcapture) self.camhsv = surface.Surface((240,180),0,self.camcapture) self.camera_enabled = True self.videopaintbtn.set_sensitive(True) else: log.debug('No cameras found, videopaint disabled.') except NameError: log.debug('Pygame camera module not found, videopaint disabled.') pass #----------------------------------------------------------------------------------------------------------------- # Mesh networking # # The mesh networking system is a little bit wacky, but works reasonably well at the moment. It might need to # be redone in the future for less stable networking environments. # # Each user maintains the current state of the canvas, as well as a 'shared image' state which represents the # 'master' state of the canvas that is shared by all the users. The command index in the drawing command list # that corresponds to the master state is also recorded. # # Each user is allowed to paint 'ahead' of the master state, by appending commands to their local command list. # After each stroke, the commands of the stroke are broadcast as the new master state to the other users. # # Whenever a new master state is received, it will contain a list of commands that follow the old master state # to reach the new one. The user simply rewinds their canvas to the old master state, replaces their local image # with the shared one, and appends the received commands to reach the new master state. # # The net effect is that when the user paints something, they broadcast their own commands, and then *receive* # their own commands, rewinding and then playing them back immediately. So, every users canvas state is simply the # sum of all the broadcasts from themselves and the other users. Since the broadcasts are serialized, every # user has the same state all the time. def init_mesh (self): self.connected = False # If True, the activity is shared with other users. self.initiating = False # If True, this instance started the activity. Otherwise, we joined it. # Set up the drawing send and receive state. # See send_and_receive_draw_commands for more information. self.draw_command_sent = 0 self.draw_command_received = 0 self.draw_command_queue = DrawCommandBuffer() # Get the presence server and self handle. self.pservice = presenceservice.get_instance() self.owner = self.pservice.get_owner() self.connect('shared', self.on_shared) # Called when the user clicks the Share button in the toolbar. self.connect('joined', self.on_join) # Called when the activity joins a remote activity. def on_shared (self, activity): self.initiating = True self.setup_sharing() # Offer a DBus tube that everyone else can connect to. self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].OfferDBusTube(DBUS_SERVICE, {}) # Cancel the intro if playing. self.set_mode(Colors.MODE_CANVAS) def on_list_tubes_reply(self, tubes): # Called by on_join. for tube_info in tubes: self.on_tube(*tube_info) def on_list_tubes_error(self, e): # Called by on_join. pass def on_join (self, activity): self.initiating = False self.setup_sharing() # List existing tubes. There should only be one, which will invoke the on_new_tube callback. self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].ListTubes( reply_handler=self.on_list_tubes_reply, error_handler=self.on_list_tubes_error) # Cancel the intro if playing. self.set_mode(Colors.MODE_CANVAS) def setup_sharing (self): """Called to initialize mesh networking objects when the activity becomes shared (on_shared) or joins an existing shared activity (on_join).""" # Cache connection related objects. self.conn = self._shared_activity.telepathy_conn self.tubes_chan = self._shared_activity.telepathy_tubes_chan self.text_chan = self._shared_activity.telepathy_text_chan # This will get called as soon as the connection is established. self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].connect_to_signal('NewTube', self.on_tube) # Called when a buddy joins us or leaves (does nothing right now). self._shared_activity.connect('buddy-joined', self.on_buddy_joined) self._shared_activity.connect('buddy-left', self.on_buddy_left) def on_tube (self, id, initiator, type, service, params, state): """Called by the NewTube callback or the ListTubes enumeration, when a real connection finally exists.""" if (type == telepathy.TUBE_TYPE_DBUS and service == DBUS_SERVICE): # If the new tube is waiting for us to finalize it, do so. if state == telepathy.TUBE_STATE_LOCAL_PENDING: self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].AcceptDBusTube(id) if not self.connected: # Create the TubeConnection object to manage the connection. self.tube = TubeConnection(self.conn, self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES], id, group_iface=self.text_chan[telepathy.CHANNEL_INTERFACE_GROUP]) ExportedGObject.__init__(self, self.tube, DBUS_PATH) # Set up DBUS Signal receiviers. self.tube.add_signal_receiver(self.ReceiveHello, 'BroadcastHello', DBUS_IFACE, path=DBUS_PATH) self.tube.add_signal_receiver(self.ReceiveCanvasMode, 'BroadcastCanvasMode', DBUS_IFACE, path=DBUS_PATH) self.tube.add_signal_receiver(self.ReceiveClear, 'BroadcastClear', DBUS_IFACE, path=DBUS_PATH) self.tube.add_signal_receiver(self.ReceiveDrawCommands, 'BroadcastDrawCommands', DBUS_IFACE, path=DBUS_PATH) self.tube.add_signal_receiver(self.ReceivePlayback, 'BroadcastPlayback', DBUS_IFACE, path=DBUS_PATH) log.debug("Connected.") self.connected = True # Limit UI choices when sharing. self.disable_shared_commands() # Announce our presence to the server. if not self.initiating: self.BroadcastHello() # Notes about DBUS signals: # - When you call a @signal function, its implementation is invoked, and the registered callback is invoked on all # the peers (including the one who invoked the signal!). So it's usually best for the @signal function to do # nothing at all. # - The 'signature' describes the parameters to the function. @signal(dbus_interface=DBUS_IFACE, signature='') def BroadcastHello (self): """Broadcast signal sent when a client joins the shared activity.""" pass def ReceiveHello (self): if not self.initiating: return # Only the initiating peer responds to Hello commands. log.debug("Received Hello. Responding with canvas state (%d commands).", self.easel.playback_length()) self.BroadcastCanvasMode() self.BroadcastClear() buf = self.easel.send_drw_commands(0, self.easel.get_num_commands()) self.BroadcastDrawCommands(buf.get_bytes(), buf.ncommands) self.update() @signal(dbus_interface=DBUS_IFACE, signature='') def BroadcastCanvasMode (self): """Broadcast signal for forcing clients into Canvas mode.""" pass def ReceiveCanvasMode (self): log.debug("ReceiveCanvasMode") if self.mode != Colors.MODE_CANVAS: self.set_mode(Colors.MODE_CANVAS) self.update() @signal(dbus_interface=DBUS_IFACE, signature='') def BroadcastClear (self): """Broadcast signal for clearing the canvas.""" pass def ReceiveClear (self): log.debug("ReceiveClear") self.easel.clear() self.easel.save_shared_image() self.update() @signal(dbus_interface=DBUS_IFACE, signature='ayi') def BroadcastDrawCommands (self, cmds, ncommands): """Broadcast signal for drawing commands.""" pass def ReceiveDrawCommands (self, cmds, ncommands): log.debug("ReceiveDrawCommands") s = "".join(chr(b) for b in cmds) # Convert dbus.ByteArray to Python string. self.draw_command_queue.append(DrawCommandBuffer(s, ncommands)) self.update() @signal(dbus_interface=DBUS_IFACE, signature='bii') def BroadcastPlayback (self, playing, playback_pos, playback_speed): """Broadcast signal controlling playback. Not yet used.""" pass def ReceivePlayback (self): log.debug("ReceivePlayback") if playing: if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) else: if self.mode == Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_CANVAS) self.easel.playback_to(playback_pos) self.easel.set_playback_speed(playback_speed) self.update() def on_buddy_joined (self, activity, buddy): log.debug('Buddy %s joined', buddy.props.nick) def on_buddy_left (self, activity, buddy): log.debug('Buddy %s left', buddy.props.nick) def send_and_receive_draw_commands (self): if self.connected: # Broadcast drawing commands that were generated by this user since the last call to this function. if self.draw_command_sent < self.easel.get_num_commands(): # TODO: Always prepend the current brush here. buf = self.easel.send_drw_commands(self.draw_command_sent, self.easel.get_num_commands()-self.draw_command_sent) self.BroadcastDrawCommands(buf.get_bytes(), buf.ncommands) # Play any queued draw commands that were received from the host. If there are any, we first reset the # canvas contents back to the last received state and then play them back. if self.draw_command_queue.ncommands: # Also, we have to save and restore the brush around the queued commands. saved_brush = self.easel.brush self.easel.receive_drw_commands(self.draw_command_queue, self.draw_command_sent) self.easel.restore_shared_image() self.easel.play_range(self.draw_command_sent, self.easel.get_num_commands()) self.easel.save_shared_image() self.draw_command_queue.clear() self.set_brush(saved_brush) self.flush_dirty_canvas() # Note that resetting the state above means "undoing" the commands we just broadcast. We will receive them # again by our ReceiveDrawCommands callback immediately, and will play them back so the user shouldn't notice. self.draw_command_sent = self.easel.get_num_commands() def disable_shared_commands (self): """Disables UI controls which cannot be activated by non-host peers.""" # Cannot control playback. self.startbtn.set_sensitive(False) self.pausebtn.set_sensitive(False) self.beginbtn.set_sensitive(False) self.endbtn.set_sensitive(False) self.playbackposbar.set_sensitive(False) self.undobtn.set_sensitive(False) # Cannot activate sample drawings. for s in self.samplebtns: s.set_sensitive(False) #----------------------------------------------------------------------------------------------------------------- # Input device (Wacom etc.) code def init_input_devices(self): self.easelarea.set_extension_events(gtk.gdk.EXTENSION_EVENTS_CURSOR) self.devices = gtk.gdk.devices_list() for d in self.devices: log.debug('Input Device: name=\'%s\'' % (d.name)) d.set_mode(gtk.gdk.MODE_SCREEN) #----------------------------------------------------------------------------------------------------------------- # Input code def init_input (self): self.cur_buttons = 0 self.pending_press = 0 self.pending_release = 0 self.mx = 0 self.my = 0 self.pressure = 255 self.lastmx = 0 self.lastmy = 0 self.lastr = 0 def on_key_event (self, widget, event): key_name = gtk.gdk.keyval_name(event.keyval) # Useful for manually working out keyvals for OLPC keys. log.debug("on_key_event: hardware_keycode=%d name=%s", event.hardware_keycode, key_name) button = 0 if key_name == 'Shift_L' or key_name == 'Shift_R': button = Colors.BUTTON_CONTROL # Space bar for Palette (todo- need something better!). elif key_name == 'space': button = Colors.BUTTON_PALETTE # 'r' for Reference (todo- need something better!). #elif event.keyval == ord('r'): # button = Colors.BUTTON_REFERENCE # 'v' for Videopaint (todo- need something better!). elif event.keyval == ord('v'): button = Colors.BUTTON_VIDEOPAINT # 's' hotkey to save PNG thumbnail of the current canvas as 'thumb.png'. #elif event.keyval == ord('s'): # self.save_thumbnail(activity.get_bundle_path() + '/thumb.png') # OLPC 'hand' buttons for scrolling. elif event.hardware_keycode == 133 or event.hardware_keycode == 134: button = Colors.BUTTON_SCROLL # OLPC 'size' buttons for intensity. #elif event.keyval == 286: button = Colors.BUTTON_SIZE_0 #elif event.keyval == 287: button = Colors.BUTTON_SIZE_1 #elif event.keyval == 288: button = Colors.BUTTON_SIZE_2 #elif event.keyval == 289: button = Colors.BUTTON_SIZE_3 # Arrow keys for scrolling. elif key_name == 'Up': if event.type == gtk.gdk.KEY_PRESS: self.scroll_to(self.scroll + Pos(0, 50)) self.flush_entire_canvas() button = Colors.BUTTON_SCROLL elif key_name == 'Down': if event.type == gtk.gdk.KEY_PRESS: self.scroll_to(self.scroll + Pos(0, -50)) self.flush_entire_canvas() button = Colors.BUTTON_SCROLL elif key_name == 'Left': if event.type == gtk.gdk.KEY_PRESS: self.scroll_to(self.scroll + Pos(50, 0)) self.flush_entire_canvas() button = Colors.BUTTON_SCROLL elif key_name == 'Right': if event.type == gtk.gdk.KEY_PRESS: self.scroll_to(self.scroll + Pos(-50, 0)) self.flush_entire_canvas() button = Colors.BUTTON_SCROLL # Either Alt key for pick. elif key_name == 'Alt_L' or key_name == 'ISO_Level3_Shift': button = Colors.BUTTON_PICK # Gamepad directions. elif key_name == 'KP_Up': button = Colors.BUTTON_ZOOM_IN elif key_name == 'KP_Down': button = Colors.BUTTON_ZOOM_OUT elif key_name == 'KP_Left': button = Colors.BUTTON_CENTER elif key_name == 'KP_Right': button = Colors.BUTTON_SCROLL # Gamepad keys. elif key_name == 'KP_Page_Down': button = Colors.BUTTON_TOUCH elif key_name == 'KP_Page_Up': button = Colors.BUTTON_PALETTE elif key_name == 'KP_Home': button = Colors.BUTTON_PICK elif key_name == 'KP_End': button = Colors.BUTTON_UNDO if button != 0: if event.type == gtk.gdk.KEY_PRESS: self.pending_press = self.pending_press | button else: self.pending_release = self.pending_release | button self.update_input() self.update() return True else: # Not a known key. Try to store / retrieve a brush. key = unichr(event.keyval).lower() if self.cur_buttons & Colors.BUTTON_PICK: self.brush_map[key] = Brush(self.easel.brush) else: if self.brush_map.has_key(key): self.set_brush(self.brush_map[key]) return False def on_mouse_event (self, widget, event): if self.overlay_active: return if event.type == gtk.gdk.BUTTON_PRESS: if event.button == 1: self.pending_press = self.pending_press | Colors.BUTTON_TOUCH if event.type == gtk.gdk.BUTTON_RELEASE: if event.button == 1: self.pending_release = self.pending_release | Colors.BUTTON_TOUCH if event.type == gtk.gdk.MOTION_NOTIFY: if event.is_hint: x, y, state = event.window.get_pointer() else: x, y = event.get_coords() state = event.get_state() # Read pressure information if available. pressure = event.get_axis(gtk.gdk.AXIS_PRESSURE) if pressure or self._prev_AXIS_PRESSURE: pressure = min(pressure, 1.0) self._prev_AXIS_PRESSURE = pressure self.pressure = int(pressure * 255) else: self.pressure = 255 # When 0 pressure is received, simulate a button release. if self.pressure <= 0: self.pending_release = self.pending_release | Colors.BUTTON_TOUCH # Sometimes x, y comes back as inf, inf. try: self.mx = int(x) self.my = int(y) except: self.mx = 0 self.my = 0 # Any mouse movement over the canvas grabs focus, so we keyboard events. if not widget.is_focus(): widget.grab_focus() self.update_input() # Process the update (unless we are animating). if not self.update_timer: self.update() #self.flush_cursor() return True def update_input (self): buttons = self.cur_buttons buttons = buttons | self.pending_press self.pending_press = 0 buttons = buttons & ~self.pending_release self.pending_release = 0 hold = buttons & self.cur_buttons if self.cur_buttons != buttons: changed = self.cur_buttons ^ buttons press = changed & buttons release = changed & self.cur_buttons self.cur_buttons = buttons if press != 0: self.on_press(press) if release != 0: self.on_release(release) if hold != 0: self.on_hold(hold) def on_press (self, button): if button & Colors.BUTTON_ZOOM_IN: self.zoom_in() return if button & Colors.BUTTON_ZOOM_OUT: self.zoom_out() return if button & Colors.BUTTON_CENTER: self.center_image() return if button & Colors.BUTTON_VIDEOPAINT: self.videopaintbtn.set_active(not self.videopaint_enabled) return if self.mode == Colors.MODE_CANVAS: if button & Colors.BUTTON_UNDO: self.undo() return if button & Colors.BUTTON_PALETTE: self.set_mode(Colors.MODE_PALETTE) return #if button & Colors.BUTTON_REFERENCE: # self.set_mode(Colors.MODE_REFERENCE) # return if button & Colors.BUTTON_SCROLL: self.set_mode(Colors.MODE_SCROLL) return if self.cur_buttons & Colors.BUTTON_PICK: self.set_mode(Colors.MODE_PICK) return if self.mode == Colors.MODE_PALETTE: if button & Colors.BUTTON_PALETTE: self.set_mode(Colors.MODE_CANVAS) return #if self.mode == Colors.MODE_REFERENCE: # if button & Colors.BUTTON_REFERENCE: # self.set_mode(Colors.MODE_CANVAS) # return def on_release (self, button): if self.mode == Colors.MODE_SCROLL: if button & Colors.BUTTON_SCROLL: self.set_mode(Colors.MODE_CANVAS) return if self.mode == Colors.MODE_PICK: if button & Colors.BUTTON_PICK: self.set_mode(Colors.MODE_CANVAS) return def on_hold (self, button): pass #----------------------------------------------------------------------------------------------------------------- # Scroll code def init_scroll (self): self.scroll = Pos(0,0) self.scrollref = None def scroll_to (self, pos): self.scroll = Pos(pos.x, pos.y) #log.debug('self.scroll: x=%f y=%f' % (self.scroll.x, self.scroll.y)) # Clamp scroll position to within absolute limits or else center. if self.easel.width*self.zoom < self.width: self.scroll.x = (self.width-self.easel.width)/2 else: self.scroll.x = max(min(self.scroll.x, 100), -(self.easel.width*self.zoom - self.width + 100)) if self.easel.height*self.zoom < self.height: self.scroll.y = (self.height-self.easel.height)/2 else: self.scroll.y = max(min(self.scroll.y, 100), -(self.easel.height*self.zoom - self.height + 100)) def center_image(self): self.scroll.x = (self.width-self.easel.width*self.zoom)/2 self.scroll.y = (self.height-self.easel.height*self.zoom)/2 self.flush_entire_canvas() #----------------------------------------------------------------------------------------------------------------- # Zoom code def init_zoom (self): self.zoom = 2.0 self.zoomref = None def zoom_to (self, zoom): # End any current stroke. self.end_draw() # Adjust scroll position to keep the mouse centered on screen while the zoom changes. scrollcenter = self.screen_to_easel(Pos(self.mx, self.my)) self.scroll = Pos(0,0) self.zoom = zoom #log.debug('zoom %f', self.zoom) scrollcenter = Pos(0,0) - self.easel_to_screen(scrollcenter) + Pos(self.mx, self.my) self.scroll_to(scrollcenter) self.zoominbtn.set_sensitive(self.zoom < 8.0) self.zoomoutbtn.set_sensitive(self.zoom > 1.0) self.flush_entire_canvas() def zoom_in (self): if self.zoom == 1.0: self.zoom_to(2.0) elif self.zoom == 2.0: self.zoom_to(4.0) elif self.zoom == 4.0: self.zoom_to(8.0) def zoom_out (self): if self.zoom == 8.0: self.zoom_to(4.0) elif self.zoom == 4.0: self.zoom_to(2.0) elif self.zoom == 2.0: self.zoom_to(1.0) def easel_to_screen(self, pos): r = Pos(pos.x, pos.y) r = r * Pos(self.zoom, self.zoom) r = r + self.scroll return r def screen_to_easel(self, pos): r = Pos(pos.x, pos.y) r = r - self.scroll r = r / Pos(self.zoom, self.zoom) return r #----------------------------------------------------------------------------------------------------------------- # Drawing commands # # These commands instruct the underlying canvas to execute commands which modify the canvas. # # All drawing operations become commands which are executed immediately and also recorded so that the painting can # be played back. def draw (self, pos): relpos = self.screen_to_easel(pos) relpos = relpos / Pos(self.easel.width, self.easel.height) self.easel.play_command(DrawCommand.create_draw(relpos, int(self.pressure)), True) def end_draw (self): if self.easel.stroke: self.easel.play_command(DrawCommand.create_end_draw(int(self.pressure)), True) # Send to sharing participants. self.send_and_receive_draw_commands() # Record a new default zoom-in focal point. #self.zoomref = (self.easel.strokemin + self.easel.strokemax) * Pos(0.5,0.5) def set_brush (self, brush): #log.debug("set_brush color=%d,%d,%d type=%d size=%d opacity=%d", brush.color.r, brush.color.g, brush.color.b, brush.type, brush.size, brush.opacity) # End any current stroke. if self.easel.stroke: self.end_draw() self.easel.play_command(DrawCommand.create_color_change(brush.color), True) self.easel.play_command(DrawCommand.create_size_change(brush.control, brush.type, brush.size/float(self.easel.width), brush.opacity), True) if self.mode == Colors.MODE_PALETTE: self.brush_controls.set_brush(self.easel.brush) self.brush_controls.queue_draw() self.brushpreviewarea.queue_draw() def pickup_color(self, pos): relpos = self.screen_to_easel(pos) color = self.easel.pickup_color(relpos) self.easel.play_command(DrawCommand.create_color_change(color), True) self.brushpreviewarea.queue_draw() def play_to (self, to): """Plays drawing commands until playbackpos.get_value() is reached. This may involve resetting the canvas if the given position is before the current position, since it is impossible to play commands backwards. Called to skip the playback position, for example when fast forwarding or rewinding or dragging the scrollbar.""" self.playbackposbar.ignore_change += 1 log.debug("play_to %d easel_pos=%d", to, self.easel.playback_pos()) total_left = 0 # This used to be in the while True: loop, such that the overlay would only appear when # there is a lot of work to do. It might go back there later. self.progress.set_size_request(self.width, self.height) self.progress.show_all() self.easelarea.set_double_buffered(True) self.overlay_active = True self.flush_entire_canvas() # Display and run the GTK loop. This hack takes a set number of events per loop, # as checking the gtk.events_pending() function leads to an infinite loop. for i in range(0, 5): if gtk.main_iteration(False): self.playbackposbar.ignore_change -= 1 #log.debug("play_to: main loop quit requested.") return # Keep looping until the position is reached. Since we activate the GTK event loop processing from within # our inner loop, the user can actually move the scrollbar while this function is running! while True: if self.easel.playback_pos() == to: break # Rewind if needed. if self.easel.playback_pos() > to: self.easel.clear_image() self.easel.start_playback() total_left = max(total_left, to - self.easel.playback_pos()) # Advance playback by as much as we can in 1/10th of a second. startpos = self.easel.playback_pos() starttk = time.time() endtk = starttk + 1.0/10.0 while self.easel.playback_pos() < to: self.easel.playback_step_to(to) if time.time() >= endtk: break #log.debug("from %d to %d in %fs", startpos, self.easel.playback_pos(), time.time()-starttk) # This is currently disabled as it leads to the playback position passing the requested position, which # can cause infinite "Working..." loops when the mouse is held down. # Leaving it out can lead to inaccurate playback, when the play-to position is mid-stroke. self.easel.playback_finish_stroke() # Update the progress bar. if total_left > 0: f = 1.0-float(to - self.easel.playback_pos())/total_left self.progress.progress.set_fraction(f) if self.easel.playback_pos() >= to: break # Comment this in to watch the painting and slow things down. #self.flush_entire_canvas() # Display and run the GTK loop. This hack takes a set number of events per loop, # as checking the gtk.events_pending() function leads to an infinite loop. for i in range(0, 5): if gtk.main_iteration(False): self.playbackposbar.ignore_change -= 1 #log.debug("play: main loop quit requested.") return self.overlay_active = False self.progress.hide_all() self.easelarea.set_double_buffered(False) self.flush_entire_canvas() self.playbackposbar.ignore_change -= 1 # Canvas repainting. These methods draw portions of the canvas to the canvasarea. def flush_dirty_canvas (self): """Causes a redraw of the canvas area which has been modified since the last call to this function.""" # Skip on negative area rectangle. if self.easel.dirtymin.x > self.easel.dirtymax.x: return mn = self.easel_to_screen(self.easel.dirtymin) mx = self.easel_to_screen(self.easel.dirtymax) #log.debug("x=%d y=%d width=%d height=%d" % (x, y, w, h)) self.draw_easelarea(gtk.gdk.Rectangle(int(mn.x), int(mn.y), int(mx.x-mn.x+1), int(mx.y-mn.y+1))) self.easel.reset_dirty_rect() def flush_entire_canvas (self): """Causes a redraw of the entire canvas area.""" self.easelarea.queue_draw() self.easel.reset_dirty_rect() def flush_cursor (self): """Causes a redraw of the canvas area covered by the cursor.""" r = int(self.zoom*self.easel.brush.size*self.pressure/256) #log.debug("mx=%d my=%d r=%d lastmx=%d lastmy=%d lastr=%d" % \ # (self.mx, self.my, r, self.lastmx, self.lastmy, self.lastr)) x0 = min(self.lastmx-self.lastr, self.mx-r) y0 = min(self.lastmy-self.lastr, self.my-r) x1 = max(self.lastmx+self.lastr, self.mx+r) y1 = max(self.lastmy+self.lastr, self.my+r) self.draw_easelarea(gtk.gdk.Rectangle(x0, y0, x1-x0+2, y1-y0+2)) #----------------------------------------------------------------------------------------------------------------- # Application states # # Colors is controlled by a finite state machine which keeps track of the current application mode (painting, # playing back, scrolling, zooming, etc) and manages transitions between states. # # Each state is allowed to process code when it is entered or left, for the purpose of initializing variables and # cleaning up afterwards. # # todo- Consider breaking up into enter_intro, enter_playback, enter_canvas, etc. def start_update_timer(self): if self.update_timer: gobject.source_remove(self.update_timer) # The timer priority is chosen to be above PRIORITY_REDRAW (which is PRIORITY_HIGH_IDLE_20, but not defined in PyGTK). self.update_timer = gobject.timeout_add(1, self.update, priority=gobject.PRIORITY_HIGH_IDLE+30) def enter_mode (self): if self.mode == Colors.MODE_INTRO: # Load and play intro movie. It was created on a DS at 60hz, so we need to speed it up drastically to # make it watchable. self.clear_undo() self.easel.clear() self.easel.load(str(activity.get_bundle_path() + "/data/intro.drw")) self.easel.set_playback_speed(8) self.easel.start_playback() self.start_update_timer() if self.mode == Colors.MODE_PLAYBACK: self.easel.set_playback_speed(1) self.easel.start_playback() self.start_update_timer() if self.mode == Colors.MODE_CANVAS: # Clear any existing button pressure to avoid a blotch on the screen when entering Canvas mode. self.cur_buttons = 0 # Reset the brush. self.set_brush(self.easel.brush) # Progress bar fixes at 100 when painting. self.playbackposbar.ignore_change += 1 self.playbackpos.set_value(100) self.playbackposbar.ignore_change -= 1 if self.mode == Colors.MODE_PICK: self.easelarea.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.CROSSHAIR)) if self.mode == Colors.MODE_SCROLL: self.easelarea.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR)) if self.mode == Colors.MODE_PALETTE: # This simply darkens the canvas slightly to prepare for an overlay to be drawn on top. #self.easel.render_overlay() # Show the brush controls window. self.easelarea.set_double_buffered(True) self.brush_controls.set_brush(self.easel.brush) self.brush_controls.show_all() self.overlay_active = True self.flush_entire_canvas() self.palettebtn.set_active(True) #if self.mode == Colors.MODE_REFERENCE: # self.easel.render_reference_overlay() # self.flush_entire_canvas() # self.showrefbtn.set_active(True) def leave_mode (self): if self.mode == Colors.MODE_INTRO: self.easel.stop_playback() self.clear_undo() self.easel.clear() self.easel.reset_brush() self.flush_entire_canvas() if self.mode == Colors.MODE_PLAYBACK: self.easel.stop_playback() if self.mode == Colors.MODE_CANVAS: # Finish any in-progress stroke. self.end_draw() if self.mode == Colors.MODE_PICK: self.easelarea.window.set_cursor(None) if self.mode == Colors.MODE_SCROLL: self.scrollref = None self.easelarea.window.set_cursor(None) if self.mode == Colors.MODE_PALETTE: self.set_brush(self.brush_controls.brush) #self.easel.clear_overlay() self.brush_controls.hide() self.easelarea.set_double_buffered(False) self.flush_entire_canvas() self.overlay_active = False self.palettebtn.set_active(False) #if self.mode == Colors.MODE_REFERENCE: # self.easel.clear_overlay() # self.flush_entire_canvas() # self.showrefbtn.set_active(False) def update_mode (self): if self.mode == None: self.set_mode(Colors.MODE_INTRO) if self.mode == Colors.MODE_INTRO: self.easel.update_playback() if not self.easel.playback_done(): self.flush_dirty_canvas() if self.cur_buttons & Colors.BUTTON_TOUCH: self.set_mode(Colors.MODE_CANVAS) return if self.mode == Colors.MODE_PLAYBACK: self.easel.update_playback() if not self.easel.playback_done(): self.flush_dirty_canvas() # Update the progress bar. progress_percent = int(100*float(self.easel.playback)/(self.easel.playback_length()+1)) if self.easel.playing and progress_percent != self.playbackpos.get_value(): self.playbackposbar.ignore_change += 1 self.playbackpos.set_value(progress_percent) self.playbackposbar.ignore_change -= 1 # Painting during playback mode allows you start where the painter left off. # But only when playback is not active. if not self.easel.playing and (self.cur_buttons & Colors.BUTTON_TOUCH): self.clear_undo() self.easel.truncate_at_playback() self.set_mode(Colors.MODE_CANVAS) return if self.mode == Colors.MODE_CANVAS: # Receive any drawing commands from peers, if not currently drawing. if not self.easel.stroke: self.send_and_receive_draw_commands() # Update drawing. if self.cur_buttons & Colors.BUTTON_TOUCH: # At the beginning of the stroke, the shared buffer is used to # save the Undo position (when in single user mode). if not self.easel.stroke: self.save_undo() if self.mx != self.lastmx or self.my != self.lastmy or self.videopaint_enabled: self.draw(Pos(self.mx, self.my)) self.flush_dirty_canvas() else: if self.easel.stroke: self.end_draw() self.flush_dirty_canvas() if self.mode == Colors.MODE_PICK: if self.cur_buttons & Colors.BUTTON_TOUCH: self.pickup_color(Pos(self.mx, self.my)) if self.mode == Colors.MODE_SCROLL: mpos = Pos(self.mx, self.my) if self.scrollref == None: self.scrollref = mpos else: move = self.scrollref - mpos if move.x != 0 or move.y != 0: self.scroll_to(self.scroll - move) self.scrollref = mpos self.flush_entire_canvas() #if self.cur_buttons & Colors.BUTTON_TOUCH: #else: # self.scrollref = None if self.mode == Colors.MODE_PALETTE: pass #if self.mode == Colors.MODE_REFERENCE: # pass def set_mode (self, mode): #log.debug("set mode %d", mode) if self.mode != None: self.leave_mode() self.mode = mode self.enter_mode() def update (self): if self.easel == None: return if not self.overlay_active: self.update_mode() # Request additional mouse events once processing is complete. #gtk.gdk.event_request_motions() # When called from timer events, stop timer when not in playback anymore. if self.mode == Colors.MODE_PLAYBACK or self.mode == Colors.MODE_INTRO: return True else: self.update_timer = None return False #----------------------------------------------------------------------------------------------------------------- # Event handlers def on_canvasarea_resize (self): rect = self.easelarea.get_allocation() #log.debug("Canvas resized to %dx%d", rect[2], rect[3]) self.width = rect[2] self.height = rect[3] # Rebuild easelimage. self.easelimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), rect[2], rect[3]) # Resize panels. self.brush_controls.set_size_request(rect[2], rect[3]) self.progress.set_size_request(rect[2], rect[3]) def draw_easelarea(self, bounds): if not self.easelarea.bin_window: return rect = self.easelarea.get_allocation() if self.easelimage is None or (rect[2] != self.width or rect[3] != self.height): self.on_canvasarea_resize() bounds = bounds.intersect(rect) if bounds.width <= 0 or bounds.height <= 0: return gc = self.easelarea.get_style().fg_gc[gtk.STATE_NORMAL] # Blit dirty rectangle of canvas into the image. dest_x = int(bounds.x) dest_y = int(bounds.y) dest_w = int(bounds.width) dest_h = int(bounds.height) if self.zoom == 1.0: spos = self.screen_to_easel(Pos(dest_x, dest_y)) self.easel.blit_1x(self.easelimage, int(spos.x), int(spos.y), dest_x, dest_y, dest_w, dest_h, self.overlay_active) elif self.zoom == 2.0: dest_x = dest_x & ~1 dest_y = dest_y & ~1 dest_w = (dest_w+1) & ~1 dest_h = (dest_h+1) & ~1 spos = self.screen_to_easel(Pos(dest_x, dest_y)) self.easel.blit_2x(self.easelimage, int(spos.x), int(spos.y), dest_x, dest_y, dest_w, dest_h, self.overlay_active) #self.easel.blit_2x( # self.easelimage, # 0, 0, rect.width, rect.height, # int(-self.scroll.x), int(-self.scroll.y), # self.overlay_active) elif self.zoom == 4.0: dest_x = dest_x & ~3 dest_y = dest_y & ~3 dest_w = (dest_w+3) & ~3 dest_h = (dest_h+3) & ~3 spos = self.screen_to_easel(Pos(dest_x, dest_y)) self.easel.blit_4x(self.easelimage, int(spos.x), int(spos.y), dest_x, dest_y, dest_w, dest_h, self.overlay_active) elif self.zoom == 8.0: dest_x = dest_x & ~7 dest_y = dest_y & ~7 dest_w = (dest_w+7) & ~7 dest_h = (dest_h+7) & ~7 spos = self.screen_to_easel(Pos(dest_x, dest_y)) self.easel.blit_8x(self.easelimage, int(spos.x), int(spos.y), dest_x, dest_y, dest_w, dest_h, self.overlay_active) # Then draw the image to the screen. self.easelarea.bin_window.draw_image( gc, self.easelimage, bounds.x, bounds.y, bounds.x, bounds.y, bounds.width, bounds.height) # Debug rectangle to test the dirty rectangle code. It should tightly box the brush at all times. #self.easelarea.bin_window.draw_rectangle(gc, False, bounds.x, bounds.y, bounds.width, bounds.height) # Draw introduction text. if self.mode == Colors.MODE_INTRO: context = self.easelarea.create_pango_context() layout = self.easelarea.create_pango_layout(_('Click anywhere to begin painting!')) layout.set_font_description(pango.FontDescription('Times 14')) size = layout.get_size() x = (self.width-size[0]/pango.SCALE)/2 y = self.height-50-size[1]/pango.SCALE self.easelarea.bin_window.draw_layout(gc, x, y, layout) self.lastr = int(self.zoom*self.easel.brush.size*self.pressure/256) self.lastmx = self.mx self.lastmy = self.my #self.easelarea.bin_window.draw_arc(self.easelarea.get_style().black_gc, False, # self.mx-self.lastr/2, self.my-self.lastr/2, # self.lastr, self.lastr, 0, 360*64) # Hack to keep toolbar up to date. For some reason it fails to draw pretty often. #self.toolbox.queue_draw() def on_easelarea_expose (self, widget, event): self.draw_easelarea(event.area) return False def on_palette (self, button): if button.get_active(): if self.mode != Colors.MODE_PALETTE: self.set_mode(Colors.MODE_PALETTE) else: self.set_mode(Colors.MODE_CANVAS) def on_brushpreview_expose (self, widget, event): self.brushpreview.brush = self.easel.brush self.brushpreview.brush.size = int(self.easel.brush.size * self.zoom) self.brushpreview.render(self.brushpreviewimage) bounds = widget.get_allocation() x = (bounds.width-self.brushpreview.size)/2 y = (bounds.height-self.brushpreview.size)/2 self.brushpreviewarea.window.draw_image(widget.get_style().fg_gc[gtk.STATE_NORMAL], self.brushpreviewimage, 0, 0, x, y, -1, -1) def on_zoom_in (self, button): self.zoom_in() if self.my <= 0: self.center_image() def on_zoom_out (self, button): self.zoom_out() if self.my <= 0: self.center_image() def on_center (self, button): self.center_image() #def on_take_reference (self, button): # self.take_reference = True #def on_show_reference (self, button): # if button.get_active(): # if self.mode != Colors.MODE_REFERENCE: # self.set_mode(Colors.MODE_REFERENCE) # else: # self.set_mode(Colors.MODE_CANVAS) def on_videopaint (self, button): if self.camera_enabled: self.videopaint_enabled = button.get_active() if button.get_active(): self.cam.start() # flips the image to start with self.cam.set_controls(hflip = 1) gobject.timeout_add(33, self.on_videopaint_tick, priority=gobject.PRIORITY_HIGH_IDLE+31) else: self.cam.stop() def on_videopaint_tick (self): if not self.camera_enabled or not self.videopaint_enabled or not self.window: return False if self.cam.query_image(): # get the new frame self.camcapture = self.cam.get_image(self.camcapture) # scale it to a quarter the size before colorspace conversion self.camsmall = transform.scale(self.camcapture,(240,180),self.camsmall) # convert colorspace to HSV, good for object tracking self.camhsv = camera.colorspace(self.camsmall,"HSV",self.camhsv) # currently just threshold the OLPC green color. cammask = mask.from_threshold(self.camhsv,(90,128,128),(50,120,120)) # find the largest object in the mask camcomponent = cammask.connected_component() camcount = camcomponent.count() # make sure its not just noise if camcount > 2000: campos = camcomponent.centroid() # scale and adjust it so the borders can still be reached size = self.window.get_size() mx = int(max(0.0, min(1.0, (campos[0]-40)/160.0)) * size[0]) my = int(max(0.0, min(1.0, (campos[1]-40)/100.0)) * size[1]) # smooth a bit mx = int(self.lastmx*0.5 + mx*0.5) my = int(self.lastmy*0.5 + my*0.5) self.mx, self.my = self.translate_coordinates(self.easelarea, mx, my) self.pressure = int(min(255,camcount/20)) self.lastmx = self.mx self.lastmy = self.my gtk.gdk.display_get_default().warp_pointer(self.get_screen(), mx, my) #self.flush_cursor() self.update() return True def on_fullscreen(self, widget): self.fullscreen() self.scroll = Pos(0, 0) def on_clear (self, button): msg = alert.ConfirmationAlert() msg.props.title = _('Clear Canvas?') msg.props.msg = _('This will erase the entire image, including the history.') def alert_response_cb(alert, response_id, self): self.remove_alert(alert) if response_id is gtk.RESPONSE_OK: if self.mode != Colors.MODE_CANVAS: self.set_mode(Colors.MODE_CANVAS) self.clear_undo() self.easel.clear() self.flush_entire_canvas() msg.connect('response', alert_response_cb, self) self.add_alert(msg) msg.show_all() def on_play (self, button): # Change to playback mode when the Play button is first pressed. if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) # Resume playback. self.easel.resume_playback() def on_pause (self, button): if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) self.easel.pause_playback() def on_skip_begin (self, button): if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) self.playbackpos.set_value(0) def on_back_one(self, button): to = self.easel.playback_pos() - 1 if to < self.easel.playback_length(): self.play_to(to) def on_forward_one (self, button): to = self.easel.playback_pos() + 1 if to < self.easel.playback_length(): self.play_to(to) def on_skip_end (self, button): if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) self.playbackpos.set_value(100) def on_sample (self, button): self.set_mode(Colors.MODE_PLAYBACK) self.clear_undo() self.easel.clear() self.easel.load(str(button.filename)) self.easel.set_playback_speed(8) self.flush_entire_canvas() self.toolbox.set_current_toolbar(2) # Switch to 'watch' toolbar. def on_playbackposbar_change (self, progress): if self.playbackposbar.ignore_change > 0: return if self.mode != Colors.MODE_PLAYBACK: self.set_mode(Colors.MODE_PLAYBACK) to = int(self.playbackpos.get_value()/100.0*self.easel.playback_length()) self.play_to(to) self.easel.pause_playback() #----------------------------------------------------------------------------------------------------------------- # Journal integration def read_file(self, file_path): log.debug("Loading from journal %s", file_path) self.set_mode(Colors.MODE_CANVAS) self.clear_undo() self.easel.clear() self.easel.load(str(file_path.encode())) self.easel.start_playback() self.easel.finish_playback() self.playbackpos.set_value(100) self.set_mode(Colors.MODE_CANVAS) log.debug("Played back %d commands", self.easel.playback_length()) self.save_undo() def write_file(self, file_path): log.debug("Saving to journal %s", file_path) self.easel.save(file_path.encode()) log.debug("Saved %d commands", self.easel.playback_length()) def take_screenshot (self): if self.easelarea and self.easelarea.bin_window: self._preview.take_screenshot(self.easelarea) def save_thumbnail(self, filename): pbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, self.width, self.height) pbuf = pbuf.get_from_image(self.easelimage, self.easelarea.get_colormap(), 0, 0, 0, 0, self.width, self.height) pbuf = pbuf.scale_simple(80, 60, gtk.gdk.INTERP_BILINEAR) pbuf.save(filename, "png") #----------------------------------------------------------------------------------------------------------------- # Undo # Only available in non-shared mode. Undo saves a single stroke in the shared buffer # for instant undo. Further undos require a replay of the drawing, which may take # awhile. # Redo not implement yet but shouldn't be hard to do. def on_undo(self, button): self.undo() def undo(self): # Cannot undo when progress window is up. if self.playbackposbar.ignore_change > 0: return if not self.connected and len(self.undo_buffer) > 0: to = self.undo_buffer.pop() print "undoing from %d to %d" % (self.easel.get_num_commands(), to) if self.undo_image_valid: # TODO: Add an API for truncating at arbitrary points when the C library is next changed. self.easel.playback = to self.easel.restore_shared_image() else: self.play_to(to) self.easel.truncate_at_playback() self.flush_entire_canvas() self.undo_image_valid = False self.update_undo() def save_undo(self): if not self.connected: to = max(0, self.easel.get_num_commands()-1) print "saving undo at ", to self.undo_buffer.append(to) self.easel.save_shared_image() self.undo_image_valid = True self.update_undo() def clear_undo(self): self.undo_buffer = [] self.undo_image_valid = False self.update_undo() def update_undo(self): if not self.connected: self.undobtn.set_sensitive(len(self.undo_buffer) > 0) #----------------------------------------------------------------------------------------------------------------- # Clipboard integration (ported from Oficina) def on_copy(self, button): w = self.easel.width*2 h = self.easel.height*2 image = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), w, h) self.easel.blit_2x(image, 0, 0, 0, 0, w, h, False) pbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, w, h) pbuf = pbuf.get_from_image(image, self.easelarea.get_colormap(), 0, 0, 0, 0, w, h) cb = gtk.clipboard_get() cb.set_image(pbuf) def on_paste(self, button): pass #----------------------------------------------------------------------------------------------------------------- # Open Web page to find more paintings. def on_web(self, event): # Create a Journal entry with a link to the gallery page. fileObject = datastore.create() fileObject.metadata['title'] = _('Colors! Gallery') fileObject.metadata['mime_type'] = 'text/uri-list' fileObject.metadata['icon-color'] = self.metadata['icon-color'] fileObject.file_path = os.path.join(self.get_activity_root(), 'instance', '%i' % time.time()) fd = open(fileObject.file_path, 'w') try: fd.write("http://colors.collectingsmiles.com/") finally: fd.close() datastore.write(fileObject, transfer_ownership=True) id = fileObject.object_id fileObject.destroy() del fileObject # Show the link in the Journal. activity.show_object_in_journal(id) #----------------------------------------------------------------------------------------------------------------- # PNG Export to Journal def on_export_png(self, event): # Create pixbuf. w = self.easel.width*2 h = self.easel.height*2 image = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), w, h) self.easel.blit_2x(image, 0, 0, 0, 0, w, h, False) pbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, w, h) pbuf = pbuf.get_from_image(image, self.easelarea.get_colormap(), 0, 0, 0, 0, w, h) # Create a new journal item. ds = datastore.create() act_meta = self.metadata ds.metadata['title'] = act_meta['title'] + ' (PNG)' ds.metadata['title_set_by_user'] = act_meta['title_set_by_user'] ds.metadata['mime_type'] = 'image/png' ds.metadata['icon-color'] = act_meta['icon-color'] #preview = self.get_preview() #if preview is not None: # ds.metadata['preview'] = dbus.ByteArray(preview) # Save the picture to a temporary file. ds.file_path = os.path.join(self.get_activity_root(), 'instance', '%i' % time.time()) pbuf.save(ds.file_path, "png") # Store the journal item. datastore.write(ds, transfer_ownership=True) ds.destroy() del ds #----------------------------------------------------------------------------------------------------------------- # Help dialog def on_help(self, widget): if widget.get_active(): self.help.set_size_request(self.width, self.height) self.help.show_all() self.easelarea.set_double_buffered(True) self.overlay_active = True self.flush_entire_canvas() else: self.help.hide_all() self.overlay_active = False self.flush_entire_canvas() #----------------------------------------------------------------------------------------------------------------- # Benchmarking # # Simply causes the C++ code to do a bunch of work and prints out the time used. Useful for testing the benefits # of optimization. def benchmark (self): # Benchmark a Canvas object. canvas = Canvas(600, 400) canvas.clear() canvas.load(activity.get_bundle_path() + "/data/intro.drw") start = time.time() for i in range(0,100): canvas.start_playback() canvas.finish_playback() log.debug("Canvas playback benchmark: %f sec", time.time()-start) #canvasimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), 600, 400) #start = time.time() #for i in range(0,100): # canvas.blit_2x(canvasimage, 0, 0, 600, 400) #log.debug("Canvas 1.0x blit benchmark: %f sec", time.time()-start) canvasimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), 1200, 800) start = time.time() for i in range(0,100): canvas.blit_2x(canvasimage, 0, 0, 0, 0, 600, 400, False) log.debug("Canvas 2.0x blit benchmark: %f sec", time.time()-start) # Benchmark a Palette object. palette = Palette(500) paletteimage = gtk.gdk.Image(gtk.gdk.IMAGE_FASTEST, gtk.gdk.visual_get_system(), BrushControlsPanel.PALETTE_SIZE, BrushControlsPanel.PALETTE_SIZE) start = time.time() for i in range(0,100): palette.render_wheel(paletteimage) log.debug("Palette wheel benchmark: %f sec", time.time()-start) start = time.time() for i in range(0,100): palette.render_triangle(paletteimage) log.debug("Palette triangle benchmark: %f sec", time.time()-start)
"""SCons.Tool.applelink Tool-specific initialization for the Apple gnu-like linker. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/applelink.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog" import SCons.Util # Even though the Mac is based on the GNU toolchain, it doesn't understand # the -rpath option, so we use the "link" tool instead of "gnulink". import link def generate(env): """Add Builders and construction variables for applelink to an Environment.""" link.generate(env) env['FRAMEWORKPATHPREFIX'] = '-F' env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__)}' env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}' env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS' env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -dynamiclib') env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS' # override the default for loadable modules, which are different # on OS X than dynamic shared libs. echoing what XCode does for # pre/suffixes: env['LDMODULEPREFIX'] = '' env['LDMODULESUFFIX'] = '' env['LDMODULEFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -bundle') env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS' def exists(env): return env['PLATFORM'] == 'darwin' # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
#!/usr/bin/env python # ***** BEGIN LICENSE BLOCK ***** # Version: MPL 1.1/GPL 2.0/LGPL 2.1 # # The contents of this file are subject to the Mozilla Public License # Version 1.1 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://www.mozilla.org/MPL/ # # Software distributed under the License is distributed on an "AS IS" # basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the # License for the specific language governing rights and limitations # under the License. # # The Original Code is Komodo code. # # The Initial Developer of the Original Code is ActiveState Software Inc. # Portions created by ActiveState Software Inc are Copyright (C) 2000-2007 # ActiveState Software Inc. All Rights Reserved. # # Contributor(s): # ActiveState Software Inc # # Alternatively, the contents of this file may be used under the terms of # either the GNU General Public License Version 2 or later (the "GPL"), or # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), # in which case the provisions of the GPL or the LGPL are applicable instead # of those above. If you wish to allow use of your version of this file only # under the terms of either the GPL or the LGPL, and not to allow others to # use your version of this file under the terms of the MPL, indicate your # decision by deleting the provisions above and replace them with the notice # and other provisions required by the GPL or the LGPL. If you do not delete # the provisions above, a recipient may use your version of this file under # the terms of any one of the MPL, the GPL or the LGPL. # # ***** END LICENSE BLOCK ***** # # Some CILE parsing utils extracted originally from phpcile and jscile # but mostly generally useful to CILEs implemented in Python. import codecs import locale import re import sys import os from codeintel2.common import CILEError #---- exported routines def tryEncoding(buffer, encoding): """ buffer, encoding -> encoding_buffer Attempts to encode the buffer using the specified encoding Returns None on failure, a Unicode version of the buffer on success. """ # log.info("_tryEncoding...%s",encoding) try: secret_decoder_ring = codecs.lookup(encoding)[1] except LookupError, e: # the encoding name doesn't exist, likely a pep263 failure # an example is using windows-1250 as the name return None try: (outdata, len) = secret_decoder_ring(buffer) return outdata except Exception, e: # Figure out the real exception types return None try: _defaultEncoding = locale.getdefaultlocale()[1] except ValueError: _defaultEncoding = None if _defaultEncoding is not None: _defaultEncoding = _defaultEncoding.lower() def getEncodedBuffer(buffer): decodedBuffer = tryEncoding(buffer, 'utf-8') if decodedBuffer is not None: return (decodedBuffer, 'utf-8', '') if _defaultEncoding is not None: decodedBuffer = tryEncoding(buffer, _defaultEncoding) if decodedBuffer is not None: return (decodedBuffer, _defaultEncoding, '') return (tryEncoding(buffer, 'iso8859-1'), 'iso8859-1', '') def urlencode_path(s): """URL-encode the given path string. This URL-encoding attempts to NOT encode characters that are typically legal path characters, e.g. '/', '\\', ':'. This is so that the result can more naturally be used as a filepath argument. The string must be an 8-bit string (that is all that URL-encoding can handle). """ from urllib import quote safe = os.sep + (os.altsep or '') + ":" return quote(s, safe=safe) #---- javadoc parsing _javadoc1 = re.compile(r'\s*\/\*(.*)\*\/', re.S) _javadoc2 = re.compile(r'^(\s*\*)', re.M) _linedoc = re.compile(r'^(\s*#|\s*\/\/)', re.M) _indent = re.compile(r'^([ \t]*)', re.M) _param = re.compile( r'^\s*@param\s+(?P<type>[\w\\]+)\s+\$(?P<name>\w+)(?:\s+?(?P<doc>.*?))?', re.M | re.U) _return = re.compile( r'^\s*@return\s+(?P<type>[\w\\]+)(?:\s+(?P<doc>.*))?', re.M | re.U) def uncommentDocString(doc): # remove block style leading and end comments d = '\n'.join(re.findall(_javadoc1, doc)) if d: # remove starting * if javadoc style d = re.sub(_javadoc2, '', d) else: d = doc # remove line style comments d = re.sub(_linedoc, '', d) # trim preceeding blank lines. we dont want to trim the first non-blank # line lines = d.split('\n') while len(lines) and not lines[0].strip(): lines.pop(0) d = '\n'.join(lines) # trip any blank end lines d = d.rstrip() # guess the indent size spaces = re.findall(_indent, d) indent = len(spaces[0]) for s in spaces: if len(s) and len(s) < indent: indent = len(s) # dedent the block if not indent: return d dedent = re.compile(r'^([ \t]{%d})' % indent, re.M) d = re.sub(dedent, '', d) return d def parseDocString(doc): d = uncommentDocString(doc) params = re.findall(_param, d) result = re.findall(_return, d) if result: result = result[0] return (d, params, result) SKIPTOK = 0x01 # don't consider this a token that is to be considered a part of the grammar, like '\n' MAPTOK = 0x02 # use the token associated with the pattern when it matches EXECFN = 0x04 # execute the function associated with the pattern when it matches USETXT = 0x08 # if you match a single character and want its ascii value to be the token class recollector: def __init__(self): self.res = {} self.regs = {} def add(self, name, reg, mods=None): self.regs[name] = reg % self.regs # print "%s = %s" % (name, self.regs[name]) if mods: self.res[name] = re.compile(self.regs[ name], mods) # check that it is valid else: self.res[name] = re.compile(self.regs[ name]) # check that it is valid # Lexer class borrowed from the PyLRd project, # http://starship.python.net/crew/scott/PyLR.html class Lexer: eof = -1 def __init__(self): self.tokenmap = {} self.prgmap = {} self.prglist = [] self.lasttok = -1 self.text = "" self.textindex = 0 self.tokennum2name = {} def nexttok(self): self.lasttok = self.lasttok + 1 return self.lasttok def settext(self, t): self.text = t self.textindex = 0 def addmatch(self, prg, func=None, tokname="", attributes=MAPTOK | EXECFN): self.prglist.append(prg) tok = -2 if not func: attributes = attributes & ~EXECFN if not tokname: attributes = attributes & ~MAPTOK if attributes & MAPTOK: self.tokenmap[tokname] = tok = self.nexttok() else: tok = self.nexttok() self.prgmap[prg] = tok, attributes, func self.tokennum2name[tok] = tokname def scan(self): for prg in self.prglist: mo = prg.match(self.text, self.textindex) if not mo: continue self.textindex = self.textindex + len(mo.group(0)) tmpres = mo.group(0) t, attributes, fn = self.prgmap[prg] # log.debug("'%s' token: %r", self.tokennum2name[t], tmpres) if attributes & EXECFN: tmpres = apply(fn, (mo,)) if attributes & USETXT: t = ord(mo.group(0)[0]) return (t, tmpres) if self.textindex >= len(self.text): return (self.eof, "") raise CILEError("Syntax Error in lexer")
#!/usr/bin/env python from distutils.core import setup for cmd in ('egg_info', 'develop'): import sys if cmd in sys.argv: from setuptools import setup setup( name='django-qsstats-magic', version='1.1.0', description='A django microframework that eases the generation of aggregate data for querysets.', long_description = open('README.rst').read(), author='Matt Croydon, Mikhail Korobov', author_email='[email protected], [email protected]', url='https://github.com/PetrDlouhy/django-qsstats-magic', packages=['qsstats'], requires=['dateutil(>=1.4.1, < 2.0)', 'six'], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
#This is used to query a JENCO 6230N pH meter #The command to query this pH meter is "S00" #The meter will respond with the a structured string of data #uD3 is the ubuntu version of the uDaq script #known issues: #Sensors limit the sampling rate to 30 hZ #The script slows down after ~1330 measurements. This occurs with or without graphing. The source of this bug is unclear. import serial import io from datetime import datetime import time from matplotlib import pyplot as plt from matplotlib import cm from matplotlib.pylab import subplots, close filename = raw_input("Save pH data as: ") #Serial port names #MacOS '/dev/tty.usbserial' #Ubuntu '/dev/ttyUSB0' #USB port names (force transducer) #MacOS '/dev/tty.usbmodem621' #Ubuntu '/dev/ttyACM1' phPORT = '/dev/ttyUSB1' forcePORT = '/dev/ttyACM1' plotLength = 20 BAUD = 9600 COLLECTION_RATE = 100 COLLECTION_SECONDS = 60*8 #change to 60*8 when done debugging class Timer(object): def __init__(self, verbose=False): self.verbose = verbose def __enter__(self): self.start = time.time() return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 if self.verbose: print 'elapsed time: %f ms' %self.msecs #may not need a function here until the demands get more complicated #def readMeter(): #serial port information for the force transducer with Timer() as t: ForceSer = serial.Serial(port=forcePORT, baudrate=BAUD) ForceSio = io.TextIOWrapper(io.BufferedRWPair(ForceSer,ForceSer)) print "=> elapsed ForceSer+ForceSio: %s s" %t.secs #serial port information for the ph meter with Timer() as t: phSer = serial.Serial(port=phPORT, baudrate=BAUD) phSio = io.TextIOWrapper(io.BufferedRWPair(phSer,phSer)) print "=> elapsed pHSer+pHSio: %s s" %t.secs numDataPoints = COLLECTION_RATE*COLLECTION_SECONDS output = open(filename, 'w') counter = 0 def getTime(): timeTemp = time.localtime() timeTuple = timeTemp[3:6] timeList = list(timeTuple) timeStr = "" temp = [] for i in timeList: temp.append(str(i)) timeStr=":".join(temp) return timeStr #Plotting not ready yet #Jenco data is parsed now #Need handling for error strings from JENCO6230N #Currently commented out the plot2 containing pH information; this is lower value than force #Testing performance gains #pltCount = 0 #y=[0] #x=[0] #phX=[0] #phY=[0] #forceX=[0] #forceY=[0] #fig, ax = subplots(1,1) #ax.set_xlim(0,numDataPoints) #ax.set_ylim(0, 1400) #ax.hold(True) #plt.ion() #plt.show(block=False) #plot = ax.plot(forceX,forceY, lw=0.5)[0] #plot2 = ax.plot(phX, phY, lw=0.5)[0] #tic = time.time() #x = [0] #y = [0] #write the header for the file output.write("Int" + "\t" + "Time" + "\t" + "Force" + "\t" + "pH" + "\t" + "mV" + "\n") while counter <= (numDataPoints): with Timer() as t: time.sleep(1/COLLECTION_RATE) print "=> sleep: %s s" %t.secs with Timer() as t: phSer.write('S00') print "=> pHser.write: %s s" %t.secs with Timer() as t: ForceSer.write(unicode('1')) print "=> ForceSer.write(): %s s" %t.secs with Timer() as t: current_time = getTime() print "=> current_time: %s s" %t.secs countStr = str(counter) with Timer() as t: line = phSer.readline() print "=> phSer.readline(): %s s" %t.secs milivoltVal = line[4:10] #the output from the JENCO6230N has fixed width so slicing works phVal= float(line[12:17]) #ForceSer.write(unicode('1')) with Timer() as t: line = ForceSer.readline() print "=> ForceSer.readline(): %s s" %t.secs forceVal = (1023-int(line)) # forceY.append(forceVal) # forceX.append(counter) # phX.append(counter) # phY.append(phVal*100) #phY is the graphed value of the pH meter reading, multiply by 100 to have it fit on the same scale as the force measurements # pltCount+=1 # if pltCount >= 10: # if len(forceX) < plotLength: # plot.set_data(forceX,forceY) # plot2.set_data(phX, phY) # else: # plot.set_data(forceX[(len(forceX)-plotLength):], forceY[(len(forceY)-plotLength):]) # plot2.set_data(phX[(len(phX)-plotLength):], phY[(len(phY)-plotLength):]) # ax.draw_artist(plot) # ax.draw_artist(plot2) # fig.canvas.blit(ax.bbox) # pltCount = 0 with Timer() as t: data = countStr + "\t" + current_time + "\t" + str(forceVal) + "\t" + str(phVal) + "\t" + milivoltVal + "\n" print "=> data string formatting: %s s" %t.secs # with Timer() as t: # print data # print "=> print data: %s s" %t.secs with Timer() as t: output.write(data) print "=> output.write(data): %s s" %t.secs counter += 1 if phSer.isOpen()==False: break output.close()
import datetime from django.db import models from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import get_language @python_2_unicode_compatible class Country(models.Model): # Table Column Fields name = models.CharField(max_length=50) def __str__(self): return self.name @python_2_unicode_compatible class Person(models.Model): # Table Column Fields name = models.CharField(max_length=128) person_country_id = models.IntegerField() # Relation Fields person_country = models.ForeignObject( Country, from_fields=['person_country_id'], to_fields=['id']) friends = models.ManyToManyField('self', through='Friendship', symmetrical=False) class Meta: ordering = ('name',) def __str__(self): return self.name @python_2_unicode_compatible class Group(models.Model): # Table Column Fields name = models.CharField(max_length=128) group_country = models.ForeignKey(Country) members = models.ManyToManyField(Person, related_name='groups', through='Membership') class Meta: ordering = ('name',) def __str__(self): return self.name @python_2_unicode_compatible class Membership(models.Model): # Table Column Fields membership_country = models.ForeignKey(Country) date_joined = models.DateTimeField(default=datetime.datetime.now) invite_reason = models.CharField(max_length=64, null=True) person_id = models.IntegerField() group_id = models.IntegerField() # Relation Fields person = models.ForeignObject( Person, from_fields=['membership_country', 'person_id'], to_fields=['person_country_id', 'id']) group = models.ForeignObject( Group, from_fields=['membership_country', 'group_id'], to_fields=['group_country', 'id']) class Meta: ordering = ('date_joined', 'invite_reason') def __str__(self): return "%s is a member of %s" % (self.person.name, self.group.name) class Friendship(models.Model): # Table Column Fields from_friend_country = models.ForeignKey(Country, related_name="from_friend_country") from_friend_id = models.IntegerField() to_friend_country_id = models.IntegerField() to_friend_id = models.IntegerField() # Relation Fields from_friend = models.ForeignObject( Person, from_fields=['from_friend_country', 'from_friend_id'], to_fields=['person_country_id', 'id'], related_name='from_friend') to_friend_country = models.ForeignObject( Country, from_fields=['to_friend_country_id'], to_fields=['id'], related_name='to_friend_country') to_friend = models.ForeignObject( Person, from_fields=['to_friend_country_id', 'to_friend_id'], to_fields=['person_country_id', 'id'], related_name='to_friend') class ArticleTranslationDescriptor(ReverseSingleRelatedObjectDescriptor): """ The set of articletranslation should not set any local fields. """ def __set__(self, instance, value): if instance is None: raise AttributeError("%s must be accessed via instance" % self.field.name) setattr(instance, self.cache_name, value) if value is not None and not self.field.rel.multiple: setattr(value, self.field.related.get_cache_name(), instance) class ColConstraint(object): # Anything with as_sql() method works in get_extra_restriction(). def __init__(self, alias, col, value): self.alias, self.col, self.value = alias, col, value def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias return '%s.%s = %%s' % (qn(self.alias), qn(self.col)), [self.value] class ActiveTranslationField(models.ForeignObject): """ This field will allow querying and fetching the currently active translation for Article from ArticleTranslation. """ requires_unique_target = False def get_extra_restriction(self, where_class, alias, related_alias): return ColConstraint(alias, 'lang', get_language()) def get_extra_descriptor_filter(self): return {'lang': get_language()} def contribute_to_class(self, cls, name): super(ActiveTranslationField, self).contribute_to_class(cls, name) setattr(cls, self.name, ArticleTranslationDescriptor(self)) @python_2_unicode_compatible class Article(models.Model): active_translation = ActiveTranslationField( 'ArticleTranslation', from_fields=['id'], to_fields=['article'], related_name='+', null=True) pub_date = models.DateField() def __str__(self): try: return self.active_translation.title except ArticleTranslation.DoesNotExist: return '[No translation found]' class NewsArticle(Article): pass class ArticleTranslation(models.Model): article = models.ForeignKey(Article) lang = models.CharField(max_length=2) title = models.CharField(max_length=100) body = models.TextField() abstract = models.CharField(max_length=400, null=True) class Meta: unique_together = ('article', 'lang') ordering = ('active_translation__title',) class ArticleTag(models.Model): article = models.ForeignKey(Article, related_name="tags", related_query_name="tag") name = models.CharField(max_length=255) class ArticleIdea(models.Model): articles = models.ManyToManyField(Article, related_name="ideas", related_query_name="idea_things") name = models.CharField(max_length=255)
""" Module for the dual-branch fall-back Draft->Published Versioning ModuleStore """ from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore, EXCLUDE_ALL from xmodule.exceptions import InvalidVersionError from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.exceptions import InsufficientSpecificationError, ItemNotFoundError from xmodule.modulestore.draft_and_published import ( ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES, UnsupportedRevisionError ) from opaque_keys.edx.locator import CourseLocator, LibraryLocator, LibraryUsageLocator from xmodule.modulestore.split_mongo import BlockKey from contracts import contract class DraftVersioningModuleStore(SplitMongoModuleStore, ModuleStoreDraftAndPublished): """ A subclass of Split that supports a dual-branch fall-back versioning framework with a Draft branch that falls back to a Published branch. """ def create_course(self, org, course, run, user_id, skip_auto_publish=False, **kwargs): """ Creates and returns the course. Args: org (str): the organization that owns the course course (str): the name of the course run (str): the name of the run user_id: id of the user creating the course kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation Returns: a CourseDescriptor """ master_branch = kwargs.pop('master_branch', ModuleStoreEnum.BranchName.draft) with self.bulk_operations(CourseLocator(org, course, run)): item = super(DraftVersioningModuleStore, self).create_course( org, course, run, user_id, master_branch=master_branch, **kwargs ) if master_branch == ModuleStoreEnum.BranchName.draft and not skip_auto_publish: # any other value is hopefully only cloning or doing something which doesn't want this value add self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs) # create any other necessary things as a side effect: ensure they populate the draft branch # and rely on auto publish to populate the published branch: split's create course doesn't # call super b/c it needs the auto publish above to have happened before any of the create_items # in this; so, this manually calls the grandparent and above methods. with self.branch_setting(ModuleStoreEnum.Branch.draft_preferred, item.id): # NOTE: DO NOT CHANGE THE SUPER. See comment above super(SplitMongoModuleStore, self).create_course( org, course, run, user_id, runtime=item.runtime, **kwargs ) return item def get_course(self, course_id, depth=0, **kwargs): course_id = self._map_revision_to_branch(course_id) return super(DraftVersioningModuleStore, self).get_course(course_id, depth=depth, **kwargs) def get_library(self, library_id, depth=0, head_validation=True, **kwargs): if not head_validation and library_id.version_guid: return SplitMongoModuleStore.get_library( self, library_id, depth=depth, head_validation=head_validation, **kwargs ) library_id = self._map_revision_to_branch(library_id) return super(DraftVersioningModuleStore, self).get_library(library_id, depth=depth, **kwargs) def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, revision=None, **kwargs): """ See :py:meth: xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.clone_course """ dest_course_id = self._map_revision_to_branch(dest_course_id, revision=revision) return super(DraftVersioningModuleStore, self).clone_course( source_course_id, dest_course_id, user_id, fields=fields, **kwargs ) def get_course_summaries(self, **kwargs): """ Returns course summaries on the Draft or Published branch depending on the branch setting. """ branch_setting = self.get_branch_setting() if branch_setting == ModuleStoreEnum.Branch.draft_preferred: return super(DraftVersioningModuleStore, self).get_course_summaries( ModuleStoreEnum.BranchName.draft, **kwargs ) elif branch_setting == ModuleStoreEnum.Branch.published_only: return super(DraftVersioningModuleStore, self).get_course_summaries( ModuleStoreEnum.BranchName.published, **kwargs ) else: raise InsufficientSpecificationError() def get_courses(self, **kwargs): """ Returns all the courses on the Draft or Published branch depending on the branch setting. """ branch_setting = self.get_branch_setting() if branch_setting == ModuleStoreEnum.Branch.draft_preferred: return super(DraftVersioningModuleStore, self).get_courses(ModuleStoreEnum.BranchName.draft, **kwargs) elif branch_setting == ModuleStoreEnum.Branch.published_only: return super(DraftVersioningModuleStore, self).get_courses(ModuleStoreEnum.BranchName.published, **kwargs) else: raise InsufficientSpecificationError() def _auto_publish_no_children(self, location, category, user_id, **kwargs): """ Publishes item if the category is DIRECT_ONLY. This assumes another method has checked that location points to the head of the branch and ignores the version. If you call this in any other context, you may blow away another user's changes. NOTE: only publishes the item at location: no children get published. """ if location.branch == ModuleStoreEnum.BranchName.draft and category in DIRECT_ONLY_CATEGORIES: # version_agnostic b/c of above assumption in docstring self.publish(location.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs) def copy_from_template(self, source_keys, dest_key, user_id, **kwargs): """ See :py:meth `SplitMongoModuleStore.copy_from_template` """ source_keys = [self._map_revision_to_branch(key) for key in source_keys] dest_key = self._map_revision_to_branch(dest_key) head_validation = kwargs.get('head_validation') new_keys = super(DraftVersioningModuleStore, self).copy_from_template( source_keys, dest_key, user_id, head_validation ) if dest_key.branch == ModuleStoreEnum.BranchName.draft: # Check if any of new_keys or their descendants need to be auto-published. # We don't use _auto_publish_no_children since children may need to be published. with self.bulk_operations(dest_key.course_key): keys_to_check = list(new_keys) while keys_to_check: usage_key = keys_to_check.pop() if usage_key.category in DIRECT_ONLY_CATEGORIES: self.publish(usage_key.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs) children = getattr(self.get_item(usage_key, **kwargs), "children", []) # e.g. if usage_key is a chapter, it may have an auto-publish sequential child keys_to_check.extend(children) return new_keys def update_item(self, descriptor, user_id, allow_not_found=False, force=False, asides=None, **kwargs): old_descriptor_locn = descriptor.location descriptor.location = self._map_revision_to_branch(old_descriptor_locn) emit_signals = descriptor.location.branch == ModuleStoreEnum.BranchName.published \ or descriptor.location.block_type in DIRECT_ONLY_CATEGORIES with self.bulk_operations(descriptor.location.course_key, emit_signals=emit_signals): item = super(DraftVersioningModuleStore, self).update_item( descriptor, user_id, allow_not_found=allow_not_found, force=force, asides=asides, **kwargs ) self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs) descriptor.location = old_descriptor_locn return item def create_item(self, user_id, course_key, block_type, block_id=None, # pylint: disable=too-many-statements definition_locator=None, fields=None, asides=None, force=False, skip_auto_publish=False, **kwargs): """ See :py:meth `ModuleStoreDraftAndPublished.create_item` """ course_key = self._map_revision_to_branch(course_key) emit_signals = course_key.branch == ModuleStoreEnum.BranchName.published \ or block_type in DIRECT_ONLY_CATEGORIES with self.bulk_operations(course_key, emit_signals=emit_signals): item = super(DraftVersioningModuleStore, self).create_item( user_id, course_key, block_type, block_id=block_id, definition_locator=definition_locator, fields=fields, asides=asides, force=force, **kwargs ) if not skip_auto_publish: self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs) return item def create_child( self, user_id, parent_usage_key, block_type, block_id=None, fields=None, asides=None, **kwargs ): parent_usage_key = self._map_revision_to_branch(parent_usage_key) with self.bulk_operations(parent_usage_key.course_key): item = super(DraftVersioningModuleStore, self).create_child( user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, asides=asides, **kwargs ) # Publish both the child and the parent, if the child is a direct-only category self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs) self._auto_publish_no_children(parent_usage_key, item.location.category, user_id, **kwargs) return item def delete_item(self, location, user_id, revision=None, skip_auto_publish=False, **kwargs): """ Delete the given item from persistence. kwargs allow modulestore specific parameters. Args: location: UsageKey of the item to be deleted user_id: id of the user deleting the item revision: None - deletes the item and its subtree, and updates the parents per description above ModuleStoreEnum.RevisionOption.published_only - removes only Published versions ModuleStoreEnum.RevisionOption.all - removes both Draft and Published parents currently only provided by contentstore.views.item.orphan_handler Otherwise, raises a ValueError. """ allowed_revisions = [ None, ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.all ] if revision not in allowed_revisions: raise UnsupportedRevisionError(allowed_revisions) autopublish_parent = False with self.bulk_operations(location.course_key): if isinstance(location, LibraryUsageLocator): branches_to_delete = [ModuleStoreEnum.BranchName.library] # Libraries don't yet have draft/publish support elif location.category in DIRECT_ONLY_CATEGORIES: branches_to_delete = [ModuleStoreEnum.BranchName.published, ModuleStoreEnum.BranchName.draft] elif revision == ModuleStoreEnum.RevisionOption.all: branches_to_delete = [ModuleStoreEnum.BranchName.published, ModuleStoreEnum.BranchName.draft] else: if revision == ModuleStoreEnum.RevisionOption.published_only: branches_to_delete = [ModuleStoreEnum.BranchName.published] elif revision is None: branches_to_delete = [ModuleStoreEnum.BranchName.draft] parent_loc = self.get_parent_location(location.for_branch(ModuleStoreEnum.BranchName.draft)) autopublish_parent = ( not skip_auto_publish and parent_loc is not None and parent_loc.block_type in DIRECT_ONLY_CATEGORIES ) self._flag_publish_event(location.course_key) for branch in branches_to_delete: branched_location = location.for_branch(branch) super(DraftVersioningModuleStore, self).delete_item(branched_location, user_id) if autopublish_parent: self.publish(parent_loc.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs) def _map_revision_to_branch(self, key, revision=None): """ Maps RevisionOptions to BranchNames, inserting them into the key """ if isinstance(key, (LibraryLocator, LibraryUsageLocator)): # Libraries don't yet have draft/publish support: draft_branch = ModuleStoreEnum.BranchName.library published_branch = ModuleStoreEnum.BranchName.library else: draft_branch = ModuleStoreEnum.BranchName.draft published_branch = ModuleStoreEnum.BranchName.published if revision == ModuleStoreEnum.RevisionOption.published_only: return key.for_branch(published_branch) elif revision == ModuleStoreEnum.RevisionOption.draft_only: return key.for_branch(draft_branch) elif revision is None: if key.branch is not None: return key elif self.get_branch_setting(key) == ModuleStoreEnum.Branch.draft_preferred: return key.for_branch(draft_branch) else: return key.for_branch(published_branch) else: raise UnsupportedRevisionError() def has_item(self, usage_key, revision=None): """ Returns True if location exists in this ModuleStore. """ usage_key = self._map_revision_to_branch(usage_key, revision=revision) return super(DraftVersioningModuleStore, self).has_item(usage_key) def get_item(self, usage_key, depth=0, revision=None, **kwargs): """ Returns the item identified by usage_key and revision. """ usage_key = self._map_revision_to_branch(usage_key, revision=revision) return super(DraftVersioningModuleStore, self).get_item(usage_key, depth=depth, **kwargs) def get_items(self, course_locator, revision=None, **kwargs): """ Returns a list of XModuleDescriptor instances for the matching items within the course with the given course_locator. """ course_locator = self._map_revision_to_branch(course_locator, revision=revision) return super(DraftVersioningModuleStore, self).get_items(course_locator, **kwargs) def get_parent_location(self, location, revision=None, **kwargs): ''' Returns the given location's parent location in this course. Args: revision: None - uses the branch setting for the revision ModuleStoreEnum.RevisionOption.published_only - return only the PUBLISHED parent if it exists, else returns None ModuleStoreEnum.RevisionOption.draft_preferred - return either the DRAFT or PUBLISHED parent, preferring DRAFT, if parent(s) exists, else returns None ''' if revision == ModuleStoreEnum.RevisionOption.draft_preferred: revision = ModuleStoreEnum.RevisionOption.draft_only location = self._map_revision_to_branch(location, revision=revision) return super(DraftVersioningModuleStore, self).get_parent_location(location, **kwargs) def get_block_original_usage(self, usage_key): """ If a block was inherited into another structure using copy_from_template, this will return the original block usage locator from which the copy was inherited. """ usage_key = self._map_revision_to_branch(usage_key) return super(DraftVersioningModuleStore, self).get_block_original_usage(usage_key) def get_orphans(self, course_key, **kwargs): course_key = self._map_revision_to_branch(course_key) return super(DraftVersioningModuleStore, self).get_orphans(course_key, **kwargs) def fix_not_found(self, course_key, user_id): """ Fix any children which point to non-existent blocks in the course's published and draft branches """ for branch in [ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.draft_only]: super(DraftVersioningModuleStore, self).fix_not_found( self._map_revision_to_branch(course_key, branch), user_id ) def has_changes(self, xblock): """ Checks if the given block has unpublished changes :param xblock: the block to check :return: True if the draft and published versions differ """ def get_course(branch_name): return self._lookup_course(xblock.location.course_key.for_branch(branch_name)).structure def get_block(course_structure, block_key): return self._get_block_from_structure(course_structure, block_key) draft_course = get_course(ModuleStoreEnum.BranchName.draft) published_course = get_course(ModuleStoreEnum.BranchName.published) def has_changes_subtree(block_key): draft_block = get_block(draft_course, block_key) if draft_block is None: # temporary fix for bad pointers TNL-1141 return True published_block = get_block(published_course, block_key) if published_block is None: return True # check if the draft has changed since the published was created if self._get_version(draft_block) != self._get_version(published_block): return True # check the children in the draft if 'children' in draft_block.fields: return any( [has_changes_subtree(child_block_id) for child_block_id in draft_block.fields['children']] ) return False return has_changes_subtree(BlockKey.from_usage_key(xblock.location)) def publish(self, location, user_id, blacklist=None, **kwargs): """ Publishes the subtree under location from the draft branch to the published branch Returns the newly published item. """ super(DraftVersioningModuleStore, self).copy( user_id, # Directly using the replace function rather than the for_branch function # because for_branch obliterates the version_guid and will lead to missed version conflicts. # TODO Instead, the for_branch implementation should be fixed in the Opaque Keys library. location.course_key.replace(branch=ModuleStoreEnum.BranchName.draft), # We clear out the version_guid here because the location here is from the draft branch, and that # won't have the same version guid location.course_key.replace(branch=ModuleStoreEnum.BranchName.published, version_guid=None), [location], blacklist=blacklist ) self._flag_publish_event(location.course_key) return self.get_item(location.for_branch(ModuleStoreEnum.BranchName.published), **kwargs) def unpublish(self, location, user_id, **kwargs): """ Deletes the published version of the item. Returns the newly unpublished item. """ if location.block_type in DIRECT_ONLY_CATEGORIES: raise InvalidVersionError(location) with self.bulk_operations(location.course_key): self.delete_item(location, user_id, revision=ModuleStoreEnum.RevisionOption.published_only) return self.get_item(location.for_branch(ModuleStoreEnum.BranchName.draft), **kwargs) def revert_to_published(self, location, user_id): """ Reverts an item to its last published version (recursively traversing all of its descendants). If no published version exists, a VersionConflictError is thrown. If a published version exists but there is no draft version of this item or any of its descendants, this method is a no-op. :raises InvalidVersionError: if no published version exists for the location specified """ if location.category in DIRECT_ONLY_CATEGORIES: return draft_course_key = location.course_key.for_branch(ModuleStoreEnum.BranchName.draft) with self.bulk_operations(draft_course_key): # get head version of Published branch published_course_structure = self._lookup_course( location.course_key.for_branch(ModuleStoreEnum.BranchName.published) ).structure published_block = self._get_block_from_structure( published_course_structure, BlockKey.from_usage_key(location) ) if published_block is None: raise InvalidVersionError(location) # create a new versioned draft structure draft_course_structure = self._lookup_course(draft_course_key).structure new_structure = self.version_structure(draft_course_key, draft_course_structure, user_id) # remove the block and its descendants from the new structure self._remove_subtree(BlockKey.from_usage_key(location), new_structure['blocks']) # copy over the block and its descendants from the published branch def copy_from_published(root_block_id): """ copies root_block_id and its descendants from published_course_structure to new_structure """ self._update_block_in_structure( new_structure, root_block_id, self._get_block_from_structure(published_course_structure, root_block_id) ) block = self._get_block_from_structure(new_structure, root_block_id) for child_block_id in block.fields.get('children', []): copy_from_published(child_block_id) copy_from_published(BlockKey.from_usage_key(location)) # update course structure and index self.update_structure(draft_course_key, new_structure) index_entry = self._get_index_if_valid(draft_course_key) if index_entry is not None: self._update_head(draft_course_key, index_entry, ModuleStoreEnum.BranchName.draft, new_structure['_id']) def force_publish_course(self, course_locator, user_id, commit=False): """ Helper method to forcefully publish a course, making the published branch point to the same structure as the draft branch. """ versions = None index_entry = self.get_course_index(course_locator) if index_entry is not None: versions = index_entry['versions'] if commit: # update published branch version only if publish and draft point to different versions if versions['published-branch'] != versions['draft-branch']: self._update_head( course_locator, index_entry, 'published-branch', index_entry['versions']['draft-branch'] ) self._flag_publish_event(course_locator) return self.get_course_index(course_locator)['versions'] return versions def get_course_history_info(self, course_locator): """ See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_course_history_info` """ course_locator = self._map_revision_to_branch(course_locator) return super(DraftVersioningModuleStore, self).get_course_history_info(course_locator) def get_course_successors(self, course_locator, version_history_depth=1): """ See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_course_successors` """ course_locator = self._map_revision_to_branch(course_locator) return super(DraftVersioningModuleStore, self).get_course_successors( course_locator, version_history_depth=version_history_depth ) def get_block_generations(self, block_locator): """ See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_block_generations` """ block_locator = self._map_revision_to_branch(block_locator) return super(DraftVersioningModuleStore, self).get_block_generations(block_locator) def has_published_version(self, xblock): """ Returns whether this xblock has a published version (whether it's up to date or not). """ return self._get_head(xblock, ModuleStoreEnum.BranchName.published) is not None def convert_to_draft(self, location, user_id): """ Create a copy of the source and mark its revision as draft. :param source: the location of the source (its revision must be None) """ # This is a no-op in Split since a draft version of the data always remains pass def _get_head(self, xblock, branch): """ Gets block at the head of specified branch """ try: course_structure = self._lookup_course(xblock.location.course_key.for_branch(branch)).structure except ItemNotFoundError: # There is no published version xblock container, e.g. Library return None return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(xblock.location)) def _get_version(self, block): """ Return the version of the given database representation of a block. """ source_version = block.edit_info.source_version return source_version if source_version is not None else block.edit_info.update_version def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs): """ Split-based modulestores need to import published blocks to both branches """ with self.bulk_operations(course_key): # hardcode course root block id if block_type == 'course': block_id = self.DEFAULT_ROOT_COURSE_BLOCK_ID elif block_type == 'library': block_id = self.DEFAULT_ROOT_LIBRARY_BLOCK_ID new_usage_key = course_key.make_usage_key(block_type, block_id) # Both the course and library import process calls import_xblock(). # If importing a course -and- the branch setting is published_only, # then the non-draft course blocks are being imported. is_course = isinstance(course_key, CourseLocator) if is_course and self.get_branch_setting() == ModuleStoreEnum.Branch.published_only: # Override any existing drafts (PLAT-297, PLAT-299). This import/publish step removes # any local changes during the course import. draft_course = course_key.for_branch(ModuleStoreEnum.BranchName.draft) with self.branch_setting(ModuleStoreEnum.Branch.draft_preferred, draft_course): # Importing the block and publishing the block links the draft & published blocks' version history. draft_block = self.import_xblock(user_id, draft_course, block_type, block_id, fields, runtime, **kwargs) return self.publish(draft_block.location.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs) # do the import partitioned_fields = self.partition_fields_by_scope(block_type, fields) course_key = self._map_revision_to_branch(course_key) # cast to branch_setting return self._update_item_from_fields( user_id, course_key, BlockKey(block_type, block_id), partitioned_fields, None, allow_not_found=True, force=True, **kwargs ) or self.get_item(new_usage_key) def compute_published_info_internal(self, xblock): """ Get the published branch and find when it was published if it was. Cache the results in the xblock """ published_block = self._get_head(xblock, ModuleStoreEnum.BranchName.published) if published_block is not None: # pylint: disable=protected-access xblock._published_by = published_block.edit_info.edited_by xblock._published_on = published_block.edit_info.edited_on @contract(asset_key='AssetKey') def find_asset_metadata(self, asset_key, **kwargs): return super(DraftVersioningModuleStore, self).find_asset_metadata( self._map_revision_to_branch(asset_key), **kwargs ) def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs): return super(DraftVersioningModuleStore, self).get_all_asset_metadata( self._map_revision_to_branch(course_key), asset_type, start, maxresults, sort, **kwargs ) def _update_course_assets(self, user_id, asset_key, update_function): """ Updates both the published and draft branches """ # if one call gets an exception, don't do the other call but pass on the exception super(DraftVersioningModuleStore, self)._update_course_assets( user_id, self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.published_only), update_function ) super(DraftVersioningModuleStore, self)._update_course_assets( user_id, self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.draft_only), update_function ) def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False): """ Updates both the published and draft branches """ # Convert each asset key to the proper branch before saving. asset_keys = [asset_md.asset_id for asset_md in asset_metadata_list] for asset_md in asset_metadata_list: asset_key = asset_md.asset_id asset_md.asset_id = self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.published_only) super(DraftVersioningModuleStore, self).save_asset_metadata_list(asset_metadata_list, user_id, import_only) for asset_md in asset_metadata_list: asset_key = asset_md.asset_id asset_md.asset_id = self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.draft_only) super(DraftVersioningModuleStore, self).save_asset_metadata_list(asset_metadata_list, user_id, import_only) # Change each asset key back to its original state. for k in asset_keys: asset_md.asset_id = k def _find_course_asset(self, asset_key): return super(DraftVersioningModuleStore, self)._find_course_asset( self._map_revision_to_branch(asset_key) ) def _find_course_assets(self, course_key): """ Split specific lookup """ return super(DraftVersioningModuleStore, self)._find_course_assets( self._map_revision_to_branch(course_key) ) def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id): """ Copies to and from both branches """ for revision in [ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.draft_only]: super(DraftVersioningModuleStore, self).copy_all_asset_metadata( self._map_revision_to_branch(source_course_key, revision), self._map_revision_to_branch(dest_course_key, revision), user_id )
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for rsyslog state checks.""" from grr.lib import flags from grr.lib import test_lib from grr.lib.checks import checks_test_lib from grr.parsers import config_file class RsyslogCheckTests(checks_test_lib.HostCheckTest): """Test the rsyslog checks.""" @classmethod def setUpClass(cls): cls.LoadCheck("rsyslog.yaml") cls.parser = config_file.RsyslogParser() def testLoggingAuthRemoteOK(self): chk_id = "CIS-LOGGING-AUTH-REMOTE" test_data = {"/etc/rsyslog.conf": "*.* @@tcp.example.com.:514;RSYSLOG_ForwardFormat"} host_data = self.GenFileData("LinuxRsyslogConfigs", test_data, self.parser) results = self.RunChecks(host_data) self.assertCheckUndetected(chk_id, results) def testLoggingAuthRemoteFail(self): chk_id = "CIS-LOGGING-AUTH-REMOTE" test_data = {"/etc/rsyslog.conf": "*.* /var/log/messages"} host_data = self.GenFileData("LinuxRsyslogConfigs", test_data, self.parser) sym = "Missing attribute: No remote destination for auth logs." found = ["Expected state was not found"] results = self.RunChecks(host_data) self.assertCheckDetectedAnom(chk_id, results, sym, found) def testLoggingFilePermissions(self): chk_id = "CIS-LOGGING-FILE-PERMISSIONS" ro = self.CreateStat("/test/ro", 0, 0, 0o0100640) rw = self.CreateStat("/test/rw", 0, 0, 0o0100666) sym = "Found: Log configurations can be modified by non-privileged users." found = ["/test/rw user: 0, group: 0, mode: -rw-rw-rw-"] results = self.GenResults(["LinuxRsyslogConfigs"], [[ro, rw]]) self.assertCheckDetectedAnom(chk_id, results, sym, found) def main(argv): test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.antenna', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## angles.h (module 'antenna'): ns3::Angles [struct] module.add_class('Angles') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## vector.h (module 'core'): ns3::Vector2D [class] module.add_class('Vector2D', import_from_module='ns.core') ## vector.h (module 'core'): ns3::Vector3D [class] module.add_class('Vector3D', import_from_module='ns.core') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## antenna-model.h (module 'antenna'): ns3::AntennaModel [class] module.add_class('AntennaModel', parent=root_module['ns3::Object']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class] module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class] module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel']) ## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class] module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector2DChecker [class] module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector2DValue [class] module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector3DChecker [class] module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector3DValue [class] module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue') typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*') typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector') typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*') typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker') typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*') typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Angles_methods(root_module, root_module['ns3::Angles']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel']) register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) return def register_Ns3Angles_methods(root_module, cls): cls.add_output_stream_operator() ## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [copy constructor] cls.add_constructor([param('ns3::Angles const &', 'arg0')]) ## angles.h (module 'antenna'): ns3::Angles::Angles() [constructor] cls.add_constructor([]) ## angles.h (module 'antenna'): ns3::Angles::Angles(double phi, double theta) [constructor] cls.add_constructor([param('double', 'phi'), param('double', 'theta')]) ## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor] cls.add_constructor([param('ns3::Vector', 'v')]) ## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor] cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')]) ## angles.h (module 'antenna'): ns3::Angles::phi [variable] cls.add_instance_attribute('phi', 'double', is_const=False) ## angles.h (module 'antenna'): ns3::Angles::theta [variable] cls.add_instance_attribute('theta', 'double', is_const=False) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Vector2D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector2D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) return def register_Ns3Vector3D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::z [variable] cls.add_instance_attribute('z', 'double', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AntennaModel_methods(root_module, cls): ## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')]) ## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor] cls.add_constructor([]) ## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function] cls.add_method('GetGainDb', 'double', [param('ns3::Angles', 'a')], is_pure_virtual=True, is_virtual=True) ## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3CosineAntennaModel_methods(root_module, cls): ## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor] cls.add_constructor([]) ## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')]) ## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetBeamwidth() const [member function] cls.add_method('GetBeamwidth', 'double', [], is_const=True) ## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function] cls.add_method('GetGainDb', 'double', [param('ns3::Angles', 'a')], is_virtual=True) ## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function] cls.add_method('GetOrientation', 'double', [], is_const=True) ## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function] cls.add_method('SetBeamwidth', 'void', [param('double', 'beamwidthDegrees')]) ## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetOrientation(double orientationDegrees) [member function] cls.add_method('SetOrientation', 'void', [param('double', 'orientationDegrees')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3IsotropicAntennaModel_methods(root_module, cls): ## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')]) ## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor] cls.add_constructor([]) ## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function] cls.add_method('GetGainDb', 'double', [param('ns3::Angles', 'a')], is_virtual=True) ## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3ParabolicAntennaModel_methods(root_module, cls): ## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor] cls.add_constructor([]) ## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')]) ## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function] cls.add_method('GetBeamwidth', 'double', [], is_const=True) ## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function] cls.add_method('GetGainDb', 'double', [param('ns3::Angles', 'a')], is_virtual=True) ## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function] cls.add_method('GetOrientation', 'double', [], is_const=True) ## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function] cls.add_method('SetBeamwidth', 'void', [param('double', 'beamwidthDegrees')]) ## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function] cls.add_method('SetOrientation', 'void', [param('double', 'orientationDegrees')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3Vector2DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')]) return def register_Ns3Vector2DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector2D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector2D const &', 'value')]) return def register_Ns3Vector3DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')]) return def register_Ns3Vector3DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector3D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector3D const &', 'value')]) return def register_functions(root_module): module = root_module ## angles.h (module 'antenna'): extern double ns3::DegreesToRadians(double degrees) [free function] module.add_function('DegreesToRadians', 'double', [param('double', 'degrees')]) ## angles.h (module 'antenna'): extern double ns3::RadiansToDegrees(double radians) [free function] module.add_function('RadiansToDegrees', 'double', [param('double', 'radians')]) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()