text
stringlengths 3
1.04M
| lang
stringclasses 4
values | len
int64 3
1.04M
|
---|---|---|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: naturalLanguageObjects.py
# Purpose: Multi-lingual conversion of pitch, etc. objects
# Authors: David Perez
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2014, 2016 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Multi-lingual conversion of pitch, etc. objects
'''
import unittest
from music21 import pitch
SUPPORTED_LANGUAGES = ['de', 'fr', 'it', 'es']
SUPPORTED_ACCIDENTALS = ['----', '---', '--', '-', '', '#', '##', '###', '####']
SUPPORTED_MICROTONES = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
def generateLanguageDictionary(languageString):
# Helper method for toPitch
# Generates a dictionary that allows the conversion of pitches from any language supported,
# consistent with the standards set by pitch.py
if languageString not in SUPPORTED_LANGUAGES:
return {}
dictionary = {}
pitchStrings = []
for microtone in SUPPORTED_MICROTONES:
for accidental in SUPPORTED_ACCIDENTALS:
pitchStrings.append(microtone + accidental)
if languageString == 'de':
for pitchString in pitchStrings:
p = pitch.Pitch(pitchString)
dictionary[p.german] = pitchString
elif languageString == 'fr':
for pitchString in pitchStrings:
p = pitch.Pitch(pitchString)
dictionary[p.french] = pitchString
elif languageString == 'it':
for pitchString in pitchStrings:
p = pitch.Pitch(pitchString)
dictionary[p.italian] = pitchString
elif languageString == 'es':
for pitchString in pitchStrings:
p = pitch.Pitch(pitchString)
dictionary[p.spanish] = pitchString
return dictionary
def toPitch(pitchString, languageString):
'''
Converts a string to a :class:`music21.pitch.Pitch` object given a language.
Supported languages are French, German, Italian, and Spanish
Defaults to C natural
>>> languageExcerpts.naturalLanguageObjects.toPitch('Es', 'de')
<music21.pitch.Pitch E->
>>> languageExcerpts.naturalLanguageObjects.toPitch('H', 'de')
<music21.pitch.Pitch B>
>>> for i in ['As', 'A', 'Ais']:
... print(languageExcerpts.naturalLanguageObjects.toPitch(i, 'de'))
A-
A
A#
'''
langDict = generateLanguageDictionary(languageString)
if pitchString not in langDict:
return pitch.Pitch('C')
return pitch.Pitch(langDict[pitchString])
def toNote(pitchString, languageString):
'''
Converts a string to a :class:`music21.note.Note` object given a language
Supported languages are French, German, Italian, and Spanish
Defaults to C Natural
>>> languageExcerpts.naturalLanguageObjects.toNote('Es', 'de')
<music21.note.Note E->
>>> languageExcerpts.naturalLanguageObjects.toNote('H', 'de')
<music21.note.Note B>
>>> for i in ['As', 'A', 'Ais']:
... print(languageExcerpts.naturalLanguageObjects.toNote(i, 'de'))
<music21.note.Note A->
<music21.note.Note A>
<music21.note.Note A#>
'''
from music21 import note
return note.Note(toPitch(pitchString, languageString))
def toChord(pitchArray, languageString):
'''
Converts a list of strings to a :class:`music21.chord.Chord` object given a language
Supported languages are French, German, Italian, and Spanish
Unsupported strings default to pitch C Natural
>>> languageExcerpts.naturalLanguageObjects.toChord(['Es', 'E', 'Eis'], 'de')
<music21.chord.Chord E- E E#>
'''
from music21 import chord
noteList = [toNote(pitchObj, languageString) for pitchObj in pitchArray]
return chord.Chord(noteList)
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
def testConvertPitches(self):
# testing defaults in case of invalid language and invalid input
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', '')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', '')))
# testing defaults in case of invalid language and valid input
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Eis', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Eis', '')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('H', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('H', '')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Sol', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Sol', '')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Re', 'hello')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('Re', '')))
# testing defaults in case of invalid input string and valid language
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', 'de')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', 'de')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', 'fr')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', 'fr')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', 'es')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', 'es')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('hello', 'it')))
self.assertEqual('<music21.pitch.Pitch C>', repr(toPitch('', 'it')))
# testing defaults in case of valid input string and valid language
self.assertEqual('<music21.pitch.Pitch C##>', repr(toPitch('do doppio diesis',
'it')))
self.assertEqual('<music21.pitch.Pitch F##>', repr(toPitch('fa doble sostenido',
'es')))
self.assertEqual('<music21.pitch.Pitch G--->', repr(toPitch('sol triple bèmol',
'es')))
self.assertEqual('<music21.pitch.Pitch D>', repr(toPitch('re', 'it')))
self.assertEqual('<music21.pitch.Pitch B-->', repr(toPitch('Heses', 'de')))
self.assertEqual('<music21.pitch.Pitch E##>', repr(toPitch('Eisis', 'de')))
self.assertEqual('<music21.pitch.Pitch A####>',
repr(toPitch('la quadruple dièse', 'fr')))
self.assertEqual('<music21.pitch.Pitch B--->', repr(toPitch('si triple bémol', 'fr')))
def testConvertNotes(self):
# testing defaults in case of invalid language and invalid input
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', '')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', '')))
# testing defaults in case of invalid language and valid input
self.assertEqual('<music21.note.Note C>', repr(toNote('Eis', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('Eis', '')))
self.assertEqual('<music21.note.Note C>', repr(toNote('H', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('H', '')))
self.assertEqual('<music21.note.Note C>', repr(toNote('Sol', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('Sol', '')))
self.assertEqual('<music21.note.Note C>', repr(toNote('Re', 'hello')))
self.assertEqual('<music21.note.Note C>', repr(toNote('Re', '')))
# testing defaults in case of invalid input string and valid language
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', 'de')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', 'de')))
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', 'fr')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', 'fr')))
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', 'es')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', 'es')))
self.assertEqual('<music21.note.Note C>', repr(toNote('hello', 'it')))
self.assertEqual('<music21.note.Note C>', repr(toNote('', 'it')))
# testing defaults in case of valid input string and valid language
self.assertEqual('<music21.note.Note C##>', repr(toNote('do doppio diesis', 'it')))
self.assertEqual('<music21.note.Note F##>', repr(toNote('fa doble sostenido', 'es')))
self.assertEqual('<music21.note.Note G--->', repr(toNote('sol triple bèmol', 'es')))
self.assertEqual('<music21.note.Note D>', repr(toNote('re', 'it')))
self.assertEqual('<music21.note.Note B-->', repr(toNote('Heses', 'de')))
self.assertEqual('<music21.note.Note E##>', repr(toNote('Eisis', 'de')))
self.assertEqual('<music21.note.Note A####>',
repr(toNote('la quadruple dièse', 'fr')))
self.assertEqual('<music21.note.Note B--->', repr(toNote('si triple bémol', 'fr')))
def testConvertChords(self):
# testing defaults in case of invalid language and no input
self.assertEqual((), toChord([], '').pitches)
self.assertEqual((), toChord([], 'hello').pitches)
# testing defaults in case of valid language and no input
self.assertEqual((), toChord([], 'de').pitches)
self.assertEqual((), toChord([], 'fr').pitches)
self.assertEqual((), toChord([], 'es').pitches)
self.assertEqual((), toChord([], 'it').pitches)
# testing defaults in case of invalid language and valid list
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Eis'], 'hello')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Eis'], '')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['H'], 'hello')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['H'], '')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Sol'], 'hello')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Sol'], '')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Re'], 'hello')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['Re'], '')))
# testing defaults in case of invalid input list and valid language
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['hello'], 'de')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord([''], 'de')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['hello'], 'fr')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord([''], 'fr')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['hello'], 'es')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord([''], 'es')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord(['hello'], 'it')))
self.assertEqual('<music21.chord.Chord C>', repr(toChord([''], 'it')))
# testing defaults in case of valid input list and valid language
self.assertEqual('<music21.chord.Chord C##>',
repr(toChord(['do doppio diesis'], 'it')))
self.assertEqual('<music21.chord.Chord F##>',
repr(toChord(['fa doble sostenido'], 'es')))
self.assertEqual('<music21.chord.Chord G--->',
repr(toChord(['sol triple bèmol'], 'es')))
self.assertEqual('<music21.chord.Chord D>', repr(toChord(['re'], 'it')))
self.assertEqual('<music21.chord.Chord B-->', repr(toChord(['Heses'], 'de')))
self.assertEqual('<music21.chord.Chord E##>', repr(toChord(['Eisis'], 'de')))
self.assertEqual('<music21.chord.Chord A####>',
repr(toChord(['la quadruple dièse'], 'fr')))
self.assertEqual('<music21.chord.Chord B--->',
repr(toChord(['si triple bémol'], 'fr')))
self.assertEqual('<music21.chord.Chord C## D>',
repr(toChord(['do doppio diesis', 're'], 'it')))
self.assertEqual('<music21.chord.Chord F## G--->',
repr(toChord(['fa doble sostenido', 'sol triple bèmol'], 'es')))
self.assertEqual('<music21.chord.Chord B-- E##>',
repr(toChord(['Heses', 'Eisis'], 'de')))
self.assertEqual('<music21.chord.Chord A#### B--->',
repr(toChord(['la quadruple dièse', 'si triple bémol'], 'fr')))
# -----------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = []
if __name__ == '__main__':
import music21
music21.mainTest(Test)
| python | 13,170 |
from copy import deepcopy
from flask import Flask, request, redirect, session, abort
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect, CSRFError
from werkzeug.local import Local, LocalProxy
import dmapiclient
from dmutils import init_app
from dmcontent.content_loader import ContentLoader
from dmcontent.utils import try_load_manifest, try_load_metadata, try_load_messages
from dmutils.user import User
from dmutils.external import external as external_blueprint
from dmutils.timing import logged_duration
from govuk_frontend_jinja.flask_ext import init_govuk_frontend
from config import configs
login_manager = LoginManager()
data_api_client = dmapiclient.DataAPIClient()
search_api_client = dmapiclient.SearchAPIClient()
csrf = CSRFProtect()
# we use our own Local for objects we explicitly want to be able to retain between requests but shouldn't
# share a common object between concurrent threads/contexts
_local = Local()
def _make_content_loader_factory(application, frameworks, initial_instance=None):
# for testing purposes we allow an initial_instance to be provided
master_cl = initial_instance if initial_instance is not None else ContentLoader('app/content')
for framework_data in frameworks:
if not framework_data['slug'] in application.config.get('DM_FRAMEWORK_CONTENT_MAP', {}):
if framework_data['framework'] == 'g-cloud':
master_cl.load_manifest(framework_data['slug'], 'services', 'services_search_filters')
# we need to be able to display old services, even on expired frameworks
master_cl.load_manifest(framework_data['slug'], 'services', 'display_service')
master_cl.load_manifest(framework_data['slug'], 'services', 'download_results')
try_load_metadata(master_cl, application, framework_data, ['following_framework'])
elif framework_data['framework'] == 'digital-outcomes-and-specialists':
master_cl.load_manifest(framework_data['slug'], 'briefs', 'display_brief')
try_load_manifest(master_cl, application, framework_data, 'briefs', 'briefs_search_filters')
# seal master_cl in a closure by returning a function which will only ever return an independent copy of it.
# this is of course only guaranteed when the initial_instance argument wasn't used.
return lambda: deepcopy(master_cl)
def _content_loader_factory():
# this is a placeholder _content_loader_factory implementation that should never get called, instead being
# replaced by one created using _make_content_loader_factory once an `application` is available to
# initialize it with
raise LookupError("content loader not ready yet: must be initialized & populated by create_app")
@logged_duration(message="Spent {duration_real}s in get_content_loader")
def get_content_loader():
if not hasattr(_local, "content_loader"):
_local.content_loader = _content_loader_factory()
return _local.content_loader
content_loader = LocalProxy(get_content_loader)
from .main.helpers.framework_helpers import get_latest_live_framework
from .main.helpers.search_save_helpers import SavedSearchStateEnum
def create_app(config_name):
application = Flask(__name__)
# allow using govuk-frontend Nunjucks templates
init_govuk_frontend(application)
init_app(
application,
configs[config_name],
data_api_client=data_api_client,
login_manager=login_manager,
search_api_client=search_api_client,
)
# replace placeholder _content_loader_factory with properly initialized one
global _content_loader_factory
_content_loader_factory = _make_content_loader_factory(
application,
data_api_client.find_frameworks().get('frameworks'),
)
from .metrics import metrics as metrics_blueprint, gds_metrics
from .main import main as main_blueprint
from .main import direct_award as direct_award_blueprint
from .main import direct_award_public as direct_award_public_blueprint
from .status import status as status_blueprint
application.register_blueprint(metrics_blueprint)
application.register_blueprint(status_blueprint)
application.register_blueprint(main_blueprint)
application.register_blueprint(direct_award_blueprint)
# direct_award_blueprint and direct_award_public_blueprint cover the same url prefix - direct_award_blueprint takes
# precedence
application.register_blueprint(direct_award_public_blueprint)
# Must be registered last so that any routes declared in the app are registered first (i.e. take precedence over
# the external NotImplemented routes in the dm-utils external blueprint).
application.register_blueprint(external_blueprint)
login_manager.login_view = '/user/login'
login_manager.login_message = None # don't flash message to user
gds_metrics.init_app(application)
csrf.init_app(application)
@application.before_request
def remove_trailing_slash():
if request.path != '/' and request.path.endswith('/'):
if request.query_string:
return redirect(
'{}?{}'.format(
request.path[:-1],
request.query_string.decode('utf-8')
),
code=301
)
else:
return redirect(request.path[:-1], code=301)
@application.before_request
def refresh_session():
session.permanent = True
session.modified = True
@application.context_processor
def inject_saved_search_temp_message_statuses():
return {state.name: state.value for state in SavedSearchStateEnum}
return application
@login_manager.user_loader
def load_user(user_id):
return User.load_user(data_api_client, user_id)
| python | 5,889 |
# -*- Mode: Python -*-
import coro
import coro.db.postgres as PG
import struct
W = coro.write_stderr
# postgres implements the COPY FROM STDIN command inside the protocol,
# if you emit a COPY command (via SQL), it will put the connection into
# copyin mode. Feed it the exact same data you would have from a file,
# and you have a much faster way of populating a database.
class writer:
def __init__ (self, forms, fout, chunk_size=16000):
self.forms = forms
self.fout = fout
self.buffer = []
self.size = 0
self.chunk_size = chunk_size
self.append ('PGCOPY\n\xff\r\n\x00')
self.append (struct.pack ('>L', 0)) # flags
self.append (struct.pack ('>L', 0)) # ext len
self.count = 0
def append (self, data):
self.buffer.append (data)
self.size += len (data)
if self.size > self.chunk_size:
self.flush()
def flush (self):
block, self.buffer = self.buffer, []
self.size = 0
block = ''.join (block)
self.fout (block)
def write_row (self, row):
row_data = [struct.pack ('>h', len(row))]
for i in range (len (self.forms)):
if row[i] is None:
row_data.append (struct.pack ('>l', -1))
else:
data = struct.pack ('>%s' % (self.forms[i],), row[i])
row_data.append (struct.pack ('>l', len(data)))
row_data.append (data)
self.count += 1
self.append (''.join (row_data,))
def done (self):
self.append (struct.pack ('>h', -1))
self.flush()
def t0():
db = PG.postgres_client ('t0', 'foo', 'bar')
db.connect()
try:
db.Q ('drop table squares;')
except PG.QueryError:
pass
db.Q ('create table squares (n int, n2 int);')
db.query ('copy squares from stdin binary;')
w = writer (('i', 'i'), db.putline)
for i in range (1000):
w.write_row ([i, i*i])
w.done()
db.endcopy()
coro.spawn (t0)
coro.event_loop()
| python | 2,061 |
from data import disasters as df
from predicts import disaster_model as model
def predict_categories(query):
"""
This function classifies the text in input
:param query: the text to classify
:return: the categories of the text
"""
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
return classification_results
| python | 472 |
from math import ceil
FNAME = 'submit'
def main():
output = ''
for p, problem in enumerate(load_input(), 1):
output += 'Case #%d:%s\n' % (p, solve(*problem))
with open('%sOutput' % FNAME, 'w', encoding='utf-8') as fp:
fp.write(output)
def load_input():
with open('%sInput' % FNAME, 'r', encoding='utf-8') as fp:
problems = int(fp.readline())
for p in range(problems):
w, h, f, p = map(int, fp.readline().split(' '))
folds = [fp.readline().strip() for fold in range(f)]
punches = [tuple(map(int, fp.readline().split(' '))) for punch in range(p)]
yield w, h, folds, punches
def solve(w, h, folds, punches):
res = punches
for fold in folds:
if fold == 'T':
punches = [(x, h - y - 1) for x, y in punches] + [(x, h + y) for x, y in punches]
h *= 2
elif fold == 'B':
punches = punches + [(x, 2 * h - y - 1) for x, y in punches]
h *= 2
elif fold == 'L':
punches = [(w - x - 1, y) for x, y in punches] + [(w + x, y) for x, y in punches]
w *= 2
elif fold == 'R':
punches = punches + [(2 * w - x - 1, y) for x, y in punches]
w *= 2
return '\n' + '\n'.join('%s %s' % punch for punch in sorted(punches))
def draw(punches, w, h):
draw = [['x' for e in range(w)] for _ in range(h)]
for x, y in punches:
draw[y][x] = 'o'
print('\n'.join(''.join(line) for line in draw))
if __name__ == '__main__':
main() | python | 1,359 |
from JumpScale import j
from OSISCMDS import OSISCMDS
from OSISClientForCat import OSISClientForCat
from OSISBaseObject import OSISBaseObject
from OSISBaseObjectComplexType import OSISBaseObjectComplexType
import JumpScale.portal.codegentools
import inspect
import imp
import sys
import ujson
import lz4
class FileLikeStreamObject(object):
def __init__(self):
self.out=""
def write(self, buf,**args):
for line in buf.rstrip().splitlines():
#print "###%s"%line
self.out+="%s\n"%line
class ClassEmpty():
pass
class NameSpaceClient(object):
def __init__(self, client, namespace):
self._client = client
self._namespace = namespace
for category in client.listNamespaceCategories(namespace):
cclient = j.core.osis.getClientForCategory(self._client, self._namespace, category)
setattr(self, category, cclient)
def __getattr__(self, category):
categories = self._client.listNamespaceCategories(self._namespace)
if category not in categories:
raise AttributeError("Category %s does not exists in namespace %s" % (category, self._namespace))
cclient = j.core.osis.getClientForCategory(self._client, self._namespace, category)
setattr(self, category, cclient)
return cclient
class OSISFactory:
"""
"""
def _redirect(self):
self._out=FileLikeStreamObject()
if not self._sysstdout:
self._sysstdout=sys.stdout
# sys.stdout=self._out
def _stopRedirect(self,pprint=False):
if self._sysstdout:
sys.stdout=self._sysstdout
out = None
if self._out:
out=self._out.out
if pprint:
print out
self._out=None
return out
def __init__(self):
self._sysstdout = None
self.osisConnections = {}
self.osisConnectionsCat={}
self.nodeguids={}
self.osisModels={}
self.namespacesInited={}
def encrypt(self,obj):
if not j.basetype.string.check(obj):
if j.basetype.dictionary.check(obj):
val=obj
else:
val=obj.__dict__
val=ujson.dumps(val)
else:
val = obj
val=lz4.dumps(val)
val=j.db.serializers.blowfish.dumps(val,self.key)
return val
def decrypt(self,val,json=False):
if not j.basetype.string.check(val):
raise RuntimeError("needs to be string")
val=j.db.serializers.blowfish.loads(val,self.key)
val=lz4.loads(val)
if json:
val=ujson.loads(val)
return val
def getLocal(self, path="", overwriteHRD=False, overwriteImplementation=False, namespacename=None):
"""
create local instance starting from path
"""
osis=OSISCMDS()
osis.init()
return osis
def startDaemon(self, path="", overwriteHRD=False, overwriteImplementation=False, key="",port=5544,superadminpasswd=None,dbconnections={},hrd=None):
"""
start deamon
"""
if hrd<>None:
self.hrd=hrd
self.key=key
self.superadminpasswd=superadminpasswd
self.dbconnections=dbconnections
if self.superadminpasswd=="":
j.events.inputerror_critical("cannot start osis, superadminpasswd needs to be specified")
daemon = j.servers.geventws.getServer(port=port)
OSISCMDS.dbconnections = dbconnections
daemon.addCMDsInterface(OSISCMDS, category="osis") # pass as class not as object !!!
daemon.daemon.cmdsInterfaces["osis"].init(path=path)#,esip=elasticsearchip,esport=elasticsearchport,db=db)
self.cmds=daemon.daemon.cmdsInterfaces["osis"]
daemon.schedule("checkchangelog", self.cmds.checkChangeLog)
daemon.start()
def getClient(self, ipaddr=None, port=5544,user=None,passwd=None,ssl=False,gevent=False):
if ipaddr==None or user==None or passwd==None:
osisjp=j.packages.findNewest(name="osis_client",domain="jumpscale")
inames=osisjp.getInstanceNames()
if len(inames)==1:
osisjp=osisjp.load(instance=inames[0])
hrd=osisjp.hrd_instance
if ipaddr==None:
ipaddr=hrd.get("osis.client.addr")
if user==None:
user=hrd.get("osis.client.login")
if passwd==None:
passwd=hrd.get("osis.client.passwd")
port=int(hrd.get("osis.client.port"))
if passwd=="EMPTY":
passwd=""
if ipaddr<>None:
if not isinstance(ipaddr, list):
ips = [ipaddr]
else:
ips = ipaddr
elif j.application.config.exists('osis.ip'):
ips = j.application.config.getList('osis.ip')
else:
ips = [ j.application.config.get('grid.master.ip') ]
connections = [ (ip, port) for ip in ips ]
key = "%s_%s_%s" % (connections, user, passwd)
if key in self.osisConnections:
return self.osisConnections[key]
if user==None or user=="node":
user="node"
passwd=j.application.config.get("grid.node.machineguid")
elif user=="root" and not passwd:
if j.application.config.exists("osis.superadmin.passwd"):
passwd=j.application.config.get("osis.superadmin.passwd")
else:
raise RuntimeError("Osis superadmin passwd has not been defined on this node, please put in #hrd (osis.superadmin.passwd) or use argument 'passwd'.")
with j.logger.nostdout():
#client = j.core.zdaemon.getZDaemonHAClient(connections, category="osis", user=user, passwd=passwd,ssl=ssl,sendformat="j", returnformat="j",gevent=gevent)
client= j.servers.geventws.getHAClient(connections, user=user, passwd=passwd,category="osis")
self.osisConnections[key] = client
return client
def getClientByInstance(self, instance=None, ssl=False, gevent=False,die=True):
if instance is None:
if hasattr(j.application, 'instanceconfig'):
instance = j.application.instanceconfig.get('osis.connection')
else:
instance = 'main'
osisjp=j.packages.findNewest(name="osis_client",domain="jumpscale")
osisjp.load(instance=instance)
if osisjp.isInstalled():
hrd=osisjp.hrd_instance
ipaddr=hrd.getList("osis.client.addr")
port=int(hrd.get("osis.client.port"))
user=hrd.get("osis.client.login")
passwd=hrd.get("osis.client.passwd")
return self.getClient(ipaddr=ipaddr, port=port, user=user, passwd=passwd, ssl=ssl, gevent=gevent)
if die:
j.events.inputerror_critical("Could not find osis_client with instance:%s, could not load osis,"%instance)
def getClientForNamespace(self, namespace, client=None):
if client==None:
client = self.getClientByInstance('main')
return NameSpaceClient(client, namespace)
def getClientForCategory(self, client, namespace, category):
"""
how to use
client=j.core.osis.getClientByInstance('main')
client4node=j.core.osis.getClientForCategory(client,"system","node")
"""
if client==None:
raise RuntimeError("Client cannot be None: getClientForCategory %s/%s"%(namespace, category))
return OSISClientForCat(client, namespace, category)
def getOsisBaseObjectClass(self):
return OSISBaseObject
def getOSISBaseObjectComplexType(self):
return OSISBaseObjectComplexType
def getOsisImplementationParentClass(self, namespacename):
"""
return parent class for osis implementation (is the implementation from which each namespace & category inherits)
"""
implpath = j.system.fs.joinPaths("logic", namespacename, "OSIS_parent.py")
classs = self._loadModuleClass(implpath)
return classs
def _generateOsisModelClassFromSpec(self,namespace,specpath,modelName="",classpath=""):
"""
generate class files for spec (can be more than 1)
generated in classpath/modelName/OsisGeneratedRootObject.py
and also classpath/modelName/model.py
@return classpath
"""
import JumpScale.baselib.specparser
j.core.specparser.parseSpecs(specpath, appname="osismodel", actorname=namespace)
# spec = j.core.specparser.getModelSpec(namespace, category, "root")
modelNames = j.core.specparser.getModelNames("osismodel", namespace)
if classpath=="":
classpath=j.system.fs.joinPaths(j.dirs.varDir,"code","osismodel",namespace)
extpath=j.system.fs.getDirName(inspect.getfile(self.getClient))
templpath=j.system.fs.joinPaths(extpath,"_templates","osiscomplextypes")
j.system.fs.copyDirTree(templpath, classpath, keepsymlinks=False, eraseDestination=False, \
skipProtectedDirs=False, overwriteFiles=False, applyHrdOnDestPaths=None)
if len(modelNames) > 0:
for modelName in modelNames:
modelspec = j.core.specparser.getModelSpec("osismodel", namespace, modelName)
modeltags = j.core.tags.getObject(modelspec.tags)
# # will generate the tasklets
# modelHasTasklets = modeltags.labelExists("tasklets")
# if modelHasTasklets:
# j.core.codegenerator.generate(modelspec, "osis", codepath=actorpath, returnClass=False, args=args)
# if spec.hasTasklets:
# self.loadOsisTasklets(actorobject, actorpath, modelName=modelspec.name)
code = j.core.codegenerator.getCodeJSModel("osismodel", namespace, modelName)
if modelspec.tags == None:
modelspec.tags = ""
index = j.core.tags.getObject(modelspec.tags).labelExists("index")
tags = j.core.tags.getObject(modelspec.tags)
classnameGenerated="JSModel_%s_%s_%s"%("osismodel", namespace, modelName)
classnameNew="%s_%s"%(namespace,modelName)
classnameNew2="%s_%s_osismodelbase"%(namespace,modelName)
code=code.replace(classnameGenerated,classnameNew2)
classpathForModel=j.system.fs.joinPaths(classpath,modelName)
j.system.fs.createDir(classpathForModel)
classpath3=j.system.fs.joinPaths(classpathForModel,"%s_osismodelbase.py"%classnameNew)
j.system.fs.writeFile(filename=classpath3,contents=code)
mpath=j.system.fs.joinPaths(classpathForModel,"model.py")
if not j.system.fs.exists(path=mpath):
j.system.fs.copyFile(j.system.fs.joinPaths(classpath,"model_template.py"),mpath)
content=j.system.fs.fileGetContents(mpath)
content=content.replace("$modelbase","%s"%classnameNew)
j.system.fs.writeFile(filename=mpath,contents=content)
return classpath
def generateOsisModelDefaults(self,namespace,specpath=""):
import JumpScale.portal.codegentools
if specpath=="":
specpath=j.system.fs.joinPaths("logic", namespace, "model.spec")
basepathspec=j.system.fs.getDirName(specpath)
if j.system.fs.exists(path=specpath):
self._generateOsisModelClassFromSpec(namespace,specpath=basepathspec,classpath=basepathspec)
def getModelTemplate(self):
extpath=j.system.fs.getDirName(inspect.getfile(self.getClient))
return j.system.fs.joinPaths(extpath,"_templates","model_template.py")
def getOsisModelClass(self,namespace,category,specpath=""):
"""
returns class generated from spec file or from model.py file
"""
key="%s_%s"%(namespace,category)
if not self.osisModels.has_key(key):
# #need to check if there is a specfile or we go from model.py
if specpath=="":
specpath=j.system.fs.joinPaths("logic", namespace, "model.spec")
basepathspec=j.system.fs.getDirName(specpath)
basepath=j.system.fs.joinPaths(basepathspec,category)
modelpath=j.system.fs.joinPaths(basepath,"model.py")
if j.system.fs.exists(path=modelpath):
klass= j.system.fs.fileGetContents(modelpath)
name=""
for line in klass.split("\n"):
if line.find("(OsisBaseObject")<>-1 and line.find("class ")<>-1:
name=line.split("(")[0].lstrip("class").strip()
if name=="":
raise RuntimeError("could not find: class $modelName(OsisBaseObject) in model class file, should always be there")
sys.path.append(basepath)
module = imp.load_source(key,modelpath)
self.osisModels[key]=module.__dict__[name]
else:
raise RuntimeError("Could not find model.py in %s"%basepath)
return self.osisModels[key]
def _loadModuleClass(self, path):
'''Load the Python module from disk using a random name'''
modname = "osis_%s"%path.replace("/","_").replace("logic_","")[:-3]
# while modname in sys.modules:
# modname = generate_module_name()
module = imp.load_source(modname, path)
# find main classname of module
# classes=[item for item in module.__dict__.keys() if (item<>"q" and item[0]<>"_")]
# if len(classes)<>1:
# j.errorconditionhandler.raiseBug(message="there must be only 1 class implemented in %s"%path,category="osis.init")
# classname=classes[0]
# return module.__dict__[classname]
try:
return module.mainclass
except Exception,e:
raise RuntimeError("Could not load module on %s, could not find 'mainclass', check code on path. Error:%s"% (path,e))
| python | 14,281 |
"""swapi_explorer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("swapi_explorer.explorer.urls")),
]
| python | 820 |
from blinker import signal
pre_request = signal('pre-request')
| python | 64 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Iterable, defaultdict
from datetime import datetime
from operator import attrgetter
import numpy as np
from ..core import Entity, TileableEntity, ChunkData, Chunk, TileableData, is_eager_mode, build_mode, Serializable
from ..tiles import handler
from ..serialize import ProviderType, ValueType, DataTypeField, ListField, TupleField, \
BoolField, StringField, AnyField
from ..compat import Enum
from ..utils import log_unhandled, on_serialize_shape, on_deserialize_shape
from .utils import get_chunk_slices
import logging
logger = logging.getLogger(__name__)
class TensorOrder(Enum):
# C order
C_ORDER = 'C'
# Fortran order
F_ORDER = 'F'
class TensorChunkData(ChunkData):
__slots__ = ()
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_order = StringField('order', on_serialize=attrgetter('value'), on_deserialize=TensorOrder)
# optional fields
_dtype = DataTypeField('dtype')
def __init__(self, op=None, index=None, shape=None, dtype=None, order=None, **kw):
super(TensorChunkData, self).__init__(_op=op, _index=index, _shape=shape,
_dtype=dtype, _order=order, **kw)
@classmethod
def cls(cls, provider):
if provider.type == ProviderType.protobuf:
from ..serialize.protos.tensor_pb2 import TensorChunkDef
return TensorChunkDef
return super(TensorChunkData, cls).cls(provider)
@property
def params(self):
# params return the properties which useful to rebuild a new chunk
return {
'shape': self.shape,
'dtype': self.dtype,
'order': self.order,
'index': self.index,
}
def __len__(self):
try:
return self.shape[0]
except IndexError:
if build_mode().is_build_mode:
return 0
raise TypeError('len() of unsized object')
@property
def shape(self):
return getattr(self, '_shape', None)
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return np.prod(self.shape).item()
@property
def dtype(self):
return getattr(self, '_dtype', None) or self.op.dtype
@property
def order(self):
return getattr(self, '_order', None)
@property
def nbytes(self):
return np.prod(self.shape) * self.dtype.itemsize
class TensorChunk(Chunk):
__slots__ = ()
_allow_data_type_ = (TensorChunkData,)
def __len__(self):
return len(self._data)
class TensorData(TileableData):
__slots__ = ()
# required fields
_order = StringField('order', on_serialize=attrgetter('value'), on_deserialize=TensorOrder)
# optional fields
_dtype = DataTypeField('dtype')
_chunks = ListField('chunks', ValueType.reference(TensorChunkData),
on_serialize=lambda x: [it.data for it in x] if x is not None else x,
on_deserialize=lambda x: [TensorChunk(it) for it in x] if x is not None else x)
def __init__(self, op=None, shape=None, dtype=None, order=None, nsplits=None, chunks=None, **kw):
super(TensorData, self).__init__(_op=op, _shape=shape, _dtype=dtype, _order=order,
_nsplits=nsplits, _chunks=chunks, **kw)
@classmethod
def cls(cls, provider):
if provider.type == ProviderType.protobuf:
from ..serialize.protos.tensor_pb2 import TensorDef
return TensorDef
return super(TensorData, cls).cls(provider)
def __str__(self):
if is_eager_mode():
return 'Tensor(op={0}, shape={1}, data=\n{2})'.format(self.op.__class__.__name__,
self.shape, str(self.fetch()))
else:
return 'Tensor(op={0}, shape={1})'.format(self.op.__class__.__name__, self.shape)
def __repr__(self):
if is_eager_mode():
return 'Tensor <op={0}, shape={1}, key={2}, data=\n{3}>'.format(self.op.__class__.__name__,
self.shape, self.key,
repr(self.fetch()))
else:
return 'Tensor <op={0}, shape={1}, key={2}>'.format(self.op.__class__.__name__,
self.shape, self.key)
@property
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
'shape': self.shape,
'dtype': self.dtype,
'order': self.order
}
@property
def flags(self):
c_order = True if self.ndim <= 1 else self.order == TensorOrder.C_ORDER
f_order = True if self.ndim <= 1 else self.order == TensorOrder.F_ORDER
return {
'C_CONTIGUOUS': c_order,
'F_CONTIGUOUS': f_order
}
@property
def real(self):
from .arithmetic import real
return real(self)
@property
def imag(self):
from .arithmetic import imag
return imag(self)
@property
def dtype(self):
return getattr(self, '_dtype', None) or self.op.dtype
@property
def order(self):
return getattr(self, '_order', None)
@property
def nbytes(self):
return np.prod(self.shape) * self.dtype.itemsize
def get_chunk_slices(self, idx):
return get_chunk_slices(self.nsplits, idx)
def is_scalar(self):
return self.ndim == 0
isscalar = is_scalar
def tosparse(self):
if self.issparse():
return self
from .datasource import fromdense
return fromdense(self)
def todense(self):
if not self.issparse():
return self
from .datasource import fromsparse
return fromsparse(self)
def transpose(self, *axes):
from .base import transpose
if len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
return transpose(self, axes)
@property
def T(self):
return self.transpose()
def reshape(self, shape, *shapes, **kw):
from .reshape import reshape
order = kw.pop('order', 'C')
if kw:
raise TypeError(
"'{0}' is an invalid keyword argument for this function".format(tuple(kw)[0]))
if isinstance(shape, Iterable):
shape = tuple(shape)
else:
shape = (shape,)
shape += shapes
return reshape(self, shape, order=order)
def _equals(self, o):
return self is o
def totiledb(self, uri, ctx=None, key=None, timestamp=None):
from .datastore import totiledb
return totiledb(uri, self, ctx=ctx, key=key, timestamp=timestamp)
@staticmethod
def from_dataframe(in_df):
from .datasource import from_dataframe
return from_dataframe(in_df)
def to_dataframe(self, *args, **kwargs):
from ..dataframe.datasource.from_tensor import from_tensor
return from_tensor(self, *args, **kwargs)
@property
def flat(self):
return flatiter(self)
class Tensor(TileableEntity):
__slots__ = ()
_allow_data_type_ = (TensorData,)
def __len__(self):
return len(self._data)
def tiles(self):
return handler.tiles(self)
def single_tiles(self):
return handler.single_tiles(self)
@property
def shape(self):
return self.data.shape
@shape.setter
def shape(self, new_shape):
self._data = self._data.reshape(new_shape).data
def _update_shape(self, new_shape):
self._data._update_shape(new_shape)
@property
def real(self):
return self.data.real
@real.setter
def real(self, new_real):
from .arithmetic.setreal import set_real
self._data = set_real(self._data, new_real).data
@property
def imag(self):
return self.data.imag
@imag.setter
def imag(self, new_imag):
from .arithmetic.setimag import set_imag
self._data = set_imag(self._data, new_imag).data
def __array__(self, dtype=None):
if is_eager_mode():
return np.asarray(self.fetch(), dtype=dtype)
else:
return np.asarray(self.execute(), dtype=dtype)
def __array_function__(self, func, types, args, kwargs):
from .. import tensor as module
for submodule in func.__module__.split('.')[1:]:
try:
module = getattr(module, submodule)
except AttributeError:
return NotImplemented
if not hasattr(module, func.__name__):
return NotImplemented
mars_func = getattr(module, func.__name__)
if mars_func is func:
# avoid Numpy func
return NotImplemented
return mars_func(*args, **kwargs)
def transpose(self, *axes):
"""
Returns a view of the tensor with axes transposed.
For a 1-D tensor, this has no effect. (To change between column and
row vectors, first cast the 1-D tensor into a matrix object.)
For a 2-D tensor, this is the usual matrix transpose.
For an n-D tensor, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : Tensor
View of `a`, with axes suitably permuted.
See Also
--------
Tensor.T : Tensor property returning the tensor transposed.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1, 2], [3, 4]])
>>> a.execute()
array([[1, 2],
[3, 4]])
>>> a.transpose().execute()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0).execute()
array([[1, 3],
[2, 4]])
"""
return self._data.transpose(*axes)
@property
def T(self):
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([[1.,2.],[3.,4.]])
>>> x.execute()
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T.execute()
array([[ 1., 3.],
[ 2., 4.]])
>>> x = mt.array([1.,2.,3.,4.])
>>> x.execute()
array([ 1., 2., 3., 4.])
>>> x.T.execute()
array([ 1., 2., 3., 4.])
"""
return self._data.T
def totiledb(self, uri, ctx=None, key=None, timestamp=None):
return self._data.totiledb(uri, ctx=ctx, key=key, timestamp=timestamp)
def copy(self, order='C'):
return super(Tensor, self).copy().astype(self.dtype, order=order, copy=False)
@property
def flat(self):
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any tensor `x`.
It allows iterating over the tensor as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
Tensor.flat : Return a flat iterator over a tensor.
Tensor.flatten : Returns a flattened copy of a tensor.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl[2:4].execute()
array([2, 3])
"""
return self._data.flat
def from_dataframe(self, in_df):
return self._data.from_dataframe(in_df)
def to_dataframe(self, *args, **kwargs):
return self._data.to_dataframe(*args, **kwargs)
def execute(self, session=None, **kw):
return self._data.execute(session, **kw)
class SparseTensor(Tensor):
__slots__ = ()
class flatiter(object):
def __init__(self, tensor):
# flatten creates a copy
self._flatten_tensor = tensor.flatten()
# ravel creates a view
self._ravel_tensor = tensor.ravel()
def __getitem__(self, item):
# a.flat[item] create a copy
return self._flatten_tensor[item]
def __setitem__(self, key, value):
# a.flat[item] = value will apply changes to original tensor
self._ravel_tensor[key] = value
class Indexes(Serializable):
_indexes = AnyField('indexes')
def __init__(self, indexes=None, **kw):
self._indexes = indexes
super(Indexes, self).__init__(**kw)
@property
def indexes(self):
return self._indexes
class MutableTensorData(TensorData):
__slots__ = ()
# required fields
_name = StringField('name')
_compression = BoolField("compression")
_chunk_eps = ListField('chunk_eps')
def __init__(self, name=None, op=None, shape=None, dtype=None, key=None, chunk_eps=None,
nsplits=None, chunks=None, **kw):
super(MutableTensorData, self).__init__(op=op, shape=shape, dtype=dtype, nsplits=nsplits,
chunks=chunks, _name=name, _key=key, _chunk_eps=chunk_eps, **kw)
@classmethod
def cls(cls, provider):
return super(MutableTensorData, cls).cls(provider)
def __str__(self):
return 'MutableTensor(op={0}, name={1}, shape={2})'.format(self.op.__class__.__name__,
self.name,
self.shape)
def __repr__(self):
return 'MutableTensor <op={0}, name={1}, shape={2}, key={3}>'.format(self.op.__class__.__name__,
self.name,
self.shape,
self.key)
@property
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
'shape': self.shape,
'dtype': self.dtype,
'name': self.name,
'compression': self.compression,
"chunk_eps": self.chunk_eps,
}
@property
def name(self):
return getattr(self, '_name', None)
@property
def compression(self):
return getattr(self, '_compression', None)
@property
def chunk_eps(self):
return getattr(self, '_chunk_eps', None)
class MutableTensor(Entity):
__slots__ = ("_chunk_to_endpoint", "_chunk_buffers", "_record_type", "_buffer_size")
_allow_data_type_ = (MutableTensorData,)
def __init__(self, *args, **kwargs):
super(MutableTensor, self).__init__(*args, **kwargs)
self._chunk_buffers = defaultdict(lambda: [])
self._record_type = np.dtype([("index", np.uint32), ("ts", np.dtype('datetime64[ns]')), ("value", self.dtype)])
if self.chunks:
self._buffer_size = np.prod(self.chunks[0].shape)
else:
# MutableTensor doesn't hold chunks in LocalSession, thus we don't care the buffer
self._buffer_size = 0
if self._data.chunk_eps is not None:
self._chunk_to_endpoint = dict((c.key, ep) for c, ep in zip(self.chunks, self._data.chunk_eps))
else:
self._chunk_to_endpoint = dict()
def __len__(self):
return len(self._data)
@property
def name(self):
return self._data.name
@property
def chunk_to_endpoint(self):
return self._chunk_to_endpoint
def __setitem__(self, index, value):
from ..session import Session
session = Session.default_or_local()
return session.write_mutable_tensor(self, index, value)
def seal(self):
from ..session import Session
session = Session.default_or_local()
return session.seal(self)
@log_unhandled
def _do_write(self, tensor_index, value):
''' Notes [buffer management of mutable tensor]:
Write operations on a mutable tensor are buffered at client. Every chunk has a
corresponding buffer in the form of
{chunk_key: [(index, ts, value)]}
Every time we write to a chunk, we will append the new operation records to
the list
At the end of write, if the buffer size exceeds `buffer_size`, the buffer will be send
to the corresponding worker.
The insights for above design are:
1. `append` on (small) list is fast
2. We try to flush the (affected) buffer to worker at the end of every write, the buffer
size is guaranteed to less than 2 * chunk_size.
'''
from .indexing.core import process_index, calc_shape
from .indexing.setitem import TensorIndex
from .utils import setitem_as_records
tensor_index = process_index(self.ndim, tensor_index)
output_shape = calc_shape(self.shape, tensor_index)
index_tensor_op = TensorIndex(dtype=self.dtype, sparse=False, indexes=tensor_index)
index_tensor = index_tensor_op.new_tensor([self], tuple(output_shape)).single_tiles()
output_chunks = index_tensor.chunks
is_scalar = np.isscalar(value) or isinstance(value, tuple) and self.dtype.fields
if not is_scalar:
value = np.broadcast_to(value, output_shape).astype(self.dtype)
nsplits_acc = [np.cumsum((0,) + tuple(c.shape[i] for c in output_chunks
if all(idx == 0 for j, idx in enumerate(c.index) if j != i)))
for i in range(len(output_chunks[0].shape))]
now = np.datetime64(datetime.now())
affected_chunk_keys = []
for output_chunk in output_chunks:
records = self._chunk_buffers[output_chunk.op.input.key]
records += setitem_as_records(nsplits_acc, output_chunk, value, now, is_scalar=is_scalar)
affected_chunk_keys.append(output_chunk.op.input.key)
# Try to flush affected chunks
return self._do_flush(self._buffer_size, affected_chunk_keys)
@log_unhandled
def _do_flush(self, buffer_size_limit=1, affected_chunk_keys=None):
chunk_records_to_send = []
affected_chunk_keys = affected_chunk_keys or self._chunk_buffers.keys()
for chunk_key in affected_chunk_keys:
records = self._chunk_buffers[chunk_key]
if len(records) >= buffer_size_limit:
chunk_records_to_send.append((chunk_key, self._chunk_to_endpoint[chunk_key],
np.array(records, dtype=self._record_type)))
self._chunk_buffers[chunk_key] = []
return chunk_records_to_send
def mutable_tensor(name, shape=None, dtype=np.float_, fill_value=None, chunk_size=None):
"""
Create or get a mutable tensor using the local or default session.
When `shape` is `None`, it will try to get the mutable tensor with name `name`. Otherwise,
it will try to create a mutable tensor using the provided `name` and `shape`.
Parameters
----------
name : str
Name of the mutable tensor.
shape : int or sequence of ints
Shape of the new mutable tensor, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the mutable tensor, e.g., `mt.int8`. Default is `mt.float_`.
chunk_size: int or tuple of ints, optional
Specifies chunk size for each dimension.
fill_value: scalar, optional
The created mutable tensor will be filled by `fill_value` defaultly, if the parameter is None,
the newly created mutable tensor will be initialized with `np.zeros`. See also `numpy.full`.
"""
from ..session import Session
session = Session.default_or_local()
if shape is None:
return session.get_mutable_tensor(name)
else:
return session.create_mutable_tensor(name, shape=shape, dtype=dtype,
fill_value=fill_value, chunk_size=chunk_size)
TENSOR_TYPE = (Tensor, TensorData)
CHUNK_TYPE = (TensorChunk, TensorChunkData)
| python | 21,944 |
from .cars import car_bp
| python | 25 |
import unittest
import os
from classy_blocks.util import grading_calculator as gc
from classy_blocks.classes import grading
from classy_blocks.classes.grading import Grading
# numbers are calculated with the calculator all this is 'borrowed' from
# https://openfoamwiki.net/index.php/Scripts/blockMesh_grading_calculation
# with a few differences:
# - scipy.optimize.<whatever> can be used here instead of barbarian bisection
# - all floats are converted to integers by rounding down (only matters for border cases)
class TestGradingCalculator(unittest.TestCase):
def test_get_start_size__count__c2c_expansion(self):
# valid cases
self.assertEqual(gc.get_start_size__count__c2c_expansion(1, 10, 1), 0.1)
self.assertAlmostEqual(gc.get_start_size__count__c2c_expansion(1, 10, 1.1), 0.06274539488, places=5)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_start_size__count__c2c_expansion(0, 10, 1)
with self.assertRaises(AssertionError):
gc.get_start_size__count__c2c_expansion(1, 0.5, 1)
def test_get_start_size__end_size__total_expansion(self):
self.assertEqual(gc.get_start_size__end_size__total_expansion(1, 0.1, 1), 0.1)
with self.assertRaises(AssertionError):
gc.get_start_size__end_size__total_expansion(0, 0.1, 1)
with self.assertRaises(AssertionError):
gc.get_start_size__end_size__total_expansion(1, 0.1, 0)
def test_get_end_size__start_size__total_expansion(self):
self.assertEqual(gc.get_end_size__start_size__total_expansion(1, 0.1, 10), 1)
with self.assertRaises(AssertionError):
gc.get_end_size__start_size__total_expansion(-1, 0.1, 0)
def test_get_count__start__size__c2c_expansion(self):
# valid cases
self.assertEqual(gc.get_count__start_size__c2c_expansion(1, 1, 1), 1)
self.assertEqual(gc.get_count__start_size__c2c_expansion(1, 0.1, 1), 10)
self.assertEqual(gc.get_count__start_size__c2c_expansion(1, 0.1, 1.1), 7)
# border cases
self.assertEqual(gc.get_count__start_size__c2c_expansion(1, 2, 1), 1)
self.assertEqual(gc.get_count__start_size__c2c_expansion(1, 1, 2), 1)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_count__start_size__c2c_expansion(0, 0.1, 1.1) # length < 0
with self.assertRaises(AssertionError):
gc.get_count__start_size__c2c_expansion(1, 0, 1.1) # start_size = 0
with self.assertRaises(ValueError):
gc.get_count__start_size__c2c_expansion(1, 0.95, 0) # c2c_expansion < 1
def test_get_count__end_size__c2c_expansion(self):
# valid cases
self.assertEqual(gc.get_count__end_size__c2c_expansion(1, 0.1, 1), 10)
self.assertEqual(gc.get_count__end_size__c2c_expansion(1, 0.1, 1.1), 25)
self.assertEqual(gc.get_count__end_size__c2c_expansion(1, 0.1, 0.9), 7)
# border cases
self.assertEqual(gc.get_count__end_size__c2c_expansion(1, 1, 1), 1)
self.assertEqual(gc.get_count__end_size__c2c_expansion(1, 1, 2), 1)
# invalid cases
with self.assertRaises(ValueError):
gc.get_count__end_size__c2c_expansion(1, 0.1, 1.5)
def test_get_count__total_expansion__c2c_expansion(self):
# valid cases
self.assertEqual(gc.get_count__total_expansion__c2c_expansion(1, 3, 1.1), 12)
# border cases
self.assertEqual(gc.get_count__total_expansion__c2c_expansion(1, 1, 1.1), 1)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_count__total_expansion__c2c_expansion(1, 1, 1)
with self.assertRaises(AssertionError):
gc.get_count__total_expansion__c2c_expansion(1, -1, 1.1)
def test_get_count__total_expansion__start_size(self):
# valid cases
self.assertEqual(gc.get_count__total_expansion__start_size(1, 1, 0.1), 10)
self.assertEqual(gc.get_count__total_expansion__start_size(1, 2, 0.1), 6)
self.assertEqual(gc.get_count__total_expansion__start_size(1, 8, 0.1), 2)
# border cases
self.assertEqual(gc.get_count__total_expansion__start_size(1, 0.9, 0.5), 2)
self.assertEqual(gc.get_count__total_expansion__start_size(1, 0.3, 1), 1)
def test_get_c2c_expansion__count__start_size(self):
# valid cases
self.assertEqual(gc.get_c2c_expansion__count__start_size(1, 10, 0.1), 1)
self.assertEqual(gc.get_c2c_expansion__count__start_size(1, 2, 0.1), 9)
self.assertAlmostEqual(gc.get_c2c_expansion__count__start_size(1, 5, 0.1), 1.352395572, places=5)
self.assertEqual(gc.get_c2c_expansion__count__start_size(1, 2, 0.5), 1)
self.assertAlmostEqual(gc.get_c2c_expansion__count__start_size(1, 10, 0.05), 1.1469127, places=5)
# border cases
self.assertEqual(gc.get_c2c_expansion__count__start_size(1, 1, 0.1), 1)
self.assertAlmostEqual(gc.get_c2c_expansion__count__start_size(1, 20, 0.1), 0.9181099911, places=5)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__start_size(0, 1, 0.1) # length = 0
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__start_size(1, 0, 0.1) # count < 1
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__start_size(1, 10, 1.1) # start_size > length
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__start_size(1, 10, 0) # start_size = 0
with self.assertRaises(ValueError):
gc.get_c2c_expansion__count__start_size(1, 10, 0.9)
def test_get_c2c_expansion__count__end_size(self):
# valid cases
self.assertEqual(gc.get_c2c_expansion__count__end_size(1, 10, 0.1), 1)
self.assertAlmostEqual(gc.get_c2c_expansion__count__end_size(1, 10, 0.01), 0.6784573173, places=5)
self.assertAlmostEqual(gc.get_c2c_expansion__count__end_size(1, 10, 0.2), 1.202420088, places=5)
# border cases
self.assertEqual(gc.get_c2c_expansion__count__end_size(1, 1, 1), 1)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__end_size(1, 0.5, 1)
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__end_size(1, 10, -0.5)
with self.assertRaises(ValueError):
gc.get_c2c_expansion__count__end_size(1, 10, 1)
def test_get_c2c_expansion__count__total_expansion(self):
# valid cases
self.assertAlmostEqual(gc.get_c2c_expansion__count__total_expansion(1, 10, 5), 1.195813175, places=5)
self.assertAlmostEqual(gc.get_c2c_expansion__count__total_expansion(1, 10, 0.5), 0.9258747123, places=5)
# border cases
self.assertEqual(gc.get_c2c_expansion__count__total_expansion(1, 10, 1), 1)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_c2c_expansion__count__total_expansion(1, 1, 1)
def test_get_total_expansion__count__c2c_expansion(self):
# valid cases
self.assertEqual(gc.get_total_expansion__count__c2c_expansion(1, 10, 1), 1)
self.assertEqual(gc.get_total_expansion__count__c2c_expansion(1, 1, 1), 1)
self.assertAlmostEqual(gc.get_total_expansion__count__c2c_expansion(1, 10, 1.1), 2.3579476, places=5)
# border cases
self.assertEqual(gc.get_total_expansion__count__c2c_expansion(1, 1, 1), 1)
# invalid cases
with self.assertRaises(AssertionError):
gc.get_total_expansion__count__c2c_expansion(1, 0.5, 1)
def test_get_total_expansion__start_size__end_size(self):
self.assertEqual(gc.get_total_expansion__start_size__end_size(1, 1, 1), 1)
self.assertAlmostEqual(gc.get_total_expansion__start_size__end_size(1, 0.1, 0.01), 0.1)
self.assertAlmostEqual(gc.get_total_expansion__start_size__end_size(1, 0.01, 0.1), 10)
with self.assertRaises(AssertionError):
gc.get_total_expansion__start_size__end_size(1, 0, 0.1)
class TestGrading(unittest.TestCase):
def setUp(self):
self.g = Grading()
def test_calculator_functions(self):
expected_functions = [
# return_value | param1 | param2 (param0 = length)
['c2c_expansion', ['count', 'end_size'], gc.get_c2c_expansion__count__end_size],
['c2c_expansion', ['count', 'start_size'], gc.get_c2c_expansion__count__start_size],
['c2c_expansion', ['count', 'total_expansion'], gc.get_c2c_expansion__count__total_expansion],
['count', ['end_size', 'c2c_expansion'], gc.get_count__end_size__c2c_expansion],
['count', ['start_size', 'c2c_expansion'], gc.get_count__start_size__c2c_expansion],
['count', ['total_expansion', 'c2c_expansion'], gc.get_count__total_expansion__c2c_expansion],
['count', ['total_expansion', 'start_size'], gc.get_count__total_expansion__start_size],
['end_size', ['start_size', 'total_expansion'], gc.get_end_size__start_size__total_expansion],
['start_size', ['count', 'c2c_expansion'], gc.get_start_size__count__c2c_expansion],
['start_size', ['end_size', 'total_expansion'], gc.get_start_size__end_size__total_expansion],
['total_expansion', ['count', 'c2c_expansion'], gc.get_total_expansion__count__c2c_expansion],
['total_expansion', ['start_size', 'end_size'], gc.get_total_expansion__start_size__end_size]
]
self.assertListEqual(expected_functions, grading.functions)
def test_calculate(self):
test_cases = [
# [{keys}, count, total_expansion]; length=1 for all cases
[{'count': 10, 'total_expansion': 5}, 10, 5],
[{'count': 10, 'c2c_expansion': 1.1}, 10, 2.357947691],
[{'count': 10, 'c2c_expansion': 0.9}, 10, 0.387420489],
[{'count': 10, 'start_size': 0.2}, 10, 0.1903283012],
[{'count': 10, 'end_size': 0.2}, 10, 5.254123465509412],
[{'count': 10, 'end_size': 0.05}, 10, 0.2912203517],
[{'total_expansion': 5, 'c2c_expansion': 1.1}, 17, 5],
[{'total_expansion': 0.2, 'c2c_expansion': 0.9}, 16, 0.2],
[{'total_expansion': 0.2, 'start_size': 0.1}, 19, 0.2],
[{'total_expansion': 5, 'start_size': 0.1}, 3, 5],
[{'total_expansion': 5, 'end_size': 0.5}, 3, 5],
[{'total_expansion': 0.2, 'end_size': 0.1}, 3, 0.2],
[{'c2c_expansion': 1.1, 'start_size': 0.1}, 7, 1.771561],
[{'c2c_expansion': 0.95, 'start_size': 0.1}, 13, 0.540360087662636],
[{'c2c_expansion': 1.1, 'end_size': 0.1}, 25, 9.849732676],
[{'c2c_expansion': 0.95, 'end_size': 0.1}, 8, 0.6983372961],
[{'start_size': 0.1, 'end_size': 0.05}, 13, 0.5],
[{'start_size': 0.05, 'end_size': 0.1}, 13, 2],
]
for t in test_cases:
results = grading.calculate(1, t[0])
self.assertEqual(results[0], t[1])
self.assertAlmostEqual(results[1], t[2], places=5)
def add_division(self, length_ratio, count_ratio, total_expansion):
self.g.divisions.append([
length_ratio, count_ratio, total_expansion
])
def test_output_empty(self):
self.assertEqual(str(self.g), 'Undefined')
def test_output_single(self):
self.add_division(1, 1, 3)
self.assertEqual(str(self.g), '3')
def test_output_multi(self):
self.add_division(0.25, 0.4, 2)
self.add_division(0.5, 0.2, 1)
self.add_division(0.25, 0.4, 0.5)
expected_output = "(" + os.linesep + \
"\t(0.25 0.4 2)" + os.linesep + \
"\t(0.5 0.2 1)" + os.linesep + \
"\t(0.25 0.4 0.5)" + os.linesep + \
")"
self.assertEqual(str(self.g), expected_output)
def test_copy(self):
""" check that copy doesn't spoil the original """
self.add_division(1, 1, 3)
h = self.g.copy()
h.divisions[0][2] = 5
self.assertEqual(self.g.divisions[0][2], 3)
def test_copy_invert_simple(self):
self.add_division(1, 1, 5)
h = self.g.copy(invert=True)
self.assertEqual(self.g.divisions[0][2], 5)
self.assertEqual(h.divisions[0][2], 0.2)
def test_add_division_fail(self):
with self.assertRaises(AssertionError):
self.g.length = 0
self.g.add_division(count=10)
self.g.length = 1
with self.assertRaises(ValueError):
# when using only 1 parameter, c2c_expansion is assumed 1;
# when specifying that as well, another parameter must be provided
self.g.add_division(c2c_expansion=1.1)
with self.assertRaises(AssertionError):
# specified total_expansion and c2c_expansion=1 aren't compatible
self.g.add_division(total_expansion=5)
def test_add_division_1(self):
""" double grading, set start_size and c2c_expansion """
self.g.set_block_size(2)
self.g.add_division(0.5, 0.1, 1.1)
self.g.add_division(0.5, 0.1, 1.1, invert=True)
self.assertListEqual(
self.g.divisions,
[
[0.5, 7, 1.7715610000000008],
[0.5, 7, 0.5644739300537772]
]
)
def test_add_division_2(self):
""" single grading, set c2c_expansion and count """
self.g.set_block_size(1)
self.g.add_division(1, c2c_expansion=1.1, count=10)
self.assertListEqual(
self.g.divisions,
[[1, 10, 2.357947691000002]]
)
def test_add_division_3(self):
""" single grading, set count and start_size """
self.g.set_block_size(1)
self.g.add_division(1, count=10, start_size=0.05)
self.assertListEqual(
self.g.divisions,
[[1, 10, 3.433788027752166]]
) | python | 14,150 |
import pandas as pd
import spicy
from sklearn.metrics.pairwise import cosine_similarity
def standardize(row):
return (row - row.mean()) / (row.max() - row.min())
def get_similar_movies(movie_name, user_rating):
# 2.5 is the mean, not sure if this is right but it works
similar_score = item_similarity_df[movie_name] * (user_rating - 2.5)
similar_score = similar_score.sort_values(ascending=False)
return similar_score
if __name__ == '__main__':
ratings = pd.read_csv('user_example.csv', index_col=0)
ratings = ratings.fillna(0)
ratings_std = ratings.apply(standardize)
print(ratings_std)
print('--------------------------------------------------------')
# taking a transpose since we want similarity between items which need to be in rows
item_similarity = cosine_similarity(ratings_std.T)
print(item_similarity)
print('--------------------------------------------------------')
item_similarity_df = pd.DataFrame(item_similarity, index=ratings.columns, columns=ratings.columns)
print(item_similarity_df)
print('--------------------------------------------------------')
print(get_similar_movies('romantic3', 1))
print('--------------------------------------------------------')
action_lover = [('action1', 5), ('romantic2', 1), ('romantic3', 1)]
similar_movies = pd.DataFrame()
for movie, rating in action_lover:
similar_movies = similar_movies.append(get_similar_movies(movie, rating), ignore_index=True)
print(similar_movies.head())
print('--------------------------------------------------------')
print(similar_movies.sum().sort_values(ascending=False))
""""
HOW THIS IS SUPPOSED TO WORK
This algorithm is supposed to compare the movies
of two different users, and if like similar movies
recommend to each other movies they haven't watched yet.
Example:
Mike likes: Avatar, Avengers, Dark Knight
Michelangelo likes: Dark Knight, Spider-man, Avengers
then the engine should recommend Mike to watch the movie Spider-man
and Michelangelo to watch the movie Avengers, or something like that
"""
| python | 2,125 |
import numpy as np
import pylab as pylab
#Evaluate the linear regression
def compute_cost(X, y, theta):
'''
Comput cost for linear regression
'''
#Number of training samples
m = y.size
predictions = X.dot(theta).flatten()
sqErrors = (predictions - y) ** 2
J = (1.0 / (2 * m)) * sqErrors.sum()
return J
def gradient_descent(X, y, theta, alpha, parada):
'''
Performs gradient descent to learn theta
by taking num_items gradient steps with learning
rate alpha
'''
m = y.size
custo_atual = 9999999999
i = 0
J_history = np.zeros(shape=(4000, 1))
while custo_atual > parada:
i = i + 1
predictions = X.dot(theta).flatten()
errors_x1 = (predictions - y) * X[:, 0]
errors_x2 = (predictions - y) * X[:, 1]
theta[0][0] = theta[0][0] - alpha * (1.0 / m) * errors_x1.sum()
theta[1][0] = theta[1][0] - alpha * (1.0 / m) * errors_x2.sum()
custo_atual = compute_cost(X, y, theta)
J_history[i, 0] = custo_atual
print 'numero de iteracoes', i
return theta, custo_atual, J_history
#Load the dataset
data = np.genfromtxt('height.csv', delimiter=',')
X = data[:, 0]
y = data[:, 1]
#Some gradient descent settings
alpha = 0.01
parada = 0.001
m = y.size
#Add a column of ones to X (interception data)
it = np.ones(shape=(m, 2))
it[:, 1] = X
#Initialize theta parameters
theta = np.zeros(shape=(2, 1))
pylab.plot( X, y, 'o')
pylab.title('Idade vs Altura')
pylab.xlabel('Idade')
pylab.ylabel('Altura')
pylab.show()
theta, custo, J_history = gradient_descent(it, y, theta, alpha, parada)
print '\nthetas', theta, '\ncusto', custo
def minha_funcao(x, theta):
return theta[0][0] + theta[1][0]*x
print J_history
pylab.plot(J_history)
pylab.title('Erro reduzindo a cada iteracao')
pylab.xlabel('Iteracao')
pylab.ylabel('Erro')
pylab.show()
pylab.plot(X, minha_funcao(X, theta), X, y, 'o')
pylab.title('Idade vs Altura')
pylab.xlabel('Idade')
pylab.ylabel('Altura')
pylab.show() | python | 2,147 |
# -*- coding: utf-8 -*-
"""
Post-processing of WEST L and H modes
J.Hillairet
03/09/2018
ICRH Antenna radial position in WEST : 2890-3060 (170mm).
LPA radial position in WEST : 2880-3080 (200mm)
LH Antenna radial position in WEST : 2910-3060 (150mm)
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
# There is three profiles
# for three Averaged Line Density (LAD)
R1_H, ne1_H = np.loadtxt('Ne_prof_WEST_Hmode_01_LAD6_Rsep_293.txt', skiprows=1, unpack=True)
R2_H, ne2_H = np.loadtxt('Ne_prof_WEST_Hmode_01_LAD9_Rsep_293.txt', skiprows=1, unpack=True)
R3_H, ne3_H = np.loadtxt('Ne_prof_WEST_Hmode_01_LAD12_Rsep_293.txt', skiprows=1, unpack=True)
# These profile are defined vs the radial position (in metre)
# In HFSS, the plasma profiles should start a x=0, so we reverse the abscisse
# from a given radius
def x(R, R_x0=2.93):
return - R1_H + R_x0
plt.subplots()
plt.plot(x(R1_H), ne1_H)
plt.plot(x(R2_H), ne2_H)
plt.plot(x(R3_H), ne3_H)
# reverse the array in order to have increasing values of x for HFSS compatibility
np.savetxt('HFSS_density_profile_LAD6.txt', np.flipud(np.array([x(R1_H), ne1_H]).T))
# export ascii files profiles for HFSS
#
#
#
#figure(1)
#plot(R1_H, ne1_H, R2_H, ne2_H, R3_H, ne3_H, lw=2)
#xlabel('R [m]')
#ylabel('$n_e$ [$m^{-3}$]')
#grid(True)
#legend(('$\overline{n}_e=6.10^{19} m^{-2}$', '$\overline{n}_e=9.10^{19} m^{-2}$', '$\overline{n}_e=12.10^{19} m^{-2}$'))
#title('WEST H-mode Density profile')
#axvline(x=2.93, ymin=0, ymax=2e20, linestyle='--', color='k') # separatrix
#
## ICRH limiter radial location
##gca().add_patch(Rectangle((2.890, 0), 0.170, 1.5e20, facecolor='r', alpha=0.2)) # (x, y), width, height
#
## Proposed Antenna Limiter Radial Location
#axvline(x=2.95, ymin=0, ymax=2e20, linestyle='-.', color='k')
#axvline(x=2.975, ymin=0, ymax=2e20, linestyle='-.', color='k')
#axvline(x=3.00, ymin=0, ymax=2e20, linestyle='-.', color='k')
#
#
#
##
## L-mode
##
#R1_L, ne1_L = np.loadtxt('Ne_prof_WEST_Lmode_02_LAD2_4_Rsep_293.txt', skiprows=1, unpack=True)
#R2_L, ne2_L = np.loadtxt('Ne_prof_WEST_Lmode_02_LAD5_4_Rsep_293.txt', skiprows=1, unpack=True)
#R3_L, ne3_L = np.loadtxt('Ne_prof_WEST_Lmode_02_LAD8_4_Rsep_293.txt', skiprows=1, unpack=True)
#
#fig2 = figure(2)
#plot(R1_L, ne1_L, R2_L, ne2_L, R3_L, ne3_L, lw=2)
#xlabel('R [m]')
#ylabel('$n_e$ [$m^{-3}$]')
#grid(True)
#legend(('$\overline{n}_e=2.10^{19} m^{-2}$', '$\overline{n}_e=5.10^{19} m^{-2}$', '$\overline{n}_e=8.10^{19} m^{-2}$'))
#title('WEST L-mode Density profile')
#
## ICRH limiter radial location
#gca().add_patch(Rectangle((2.890, 0), 0.170, 1.5e20, facecolor='r', alpha=0.2)) # (x, y), width, height
#
#axvline(x=2.93, ymin=0, ymax=2e20, linestyle='--', color='k') # sepratrix
#
#
## The profiles are exported for TOPICA evaluation,
## and corresponds to the larger antenna-separatrix distance;
## Then Daniele take care of the other two positions removing 2.5cm and 5cm from the low density side of the profile.
#R_limiter = 3.0 # maximum radial location
## Find the index in the arrays corresponding to R=R_limiter
#idx=np.where(R1_H == R_limiter)
#R_export = R1_H[:idx[0][0]+1]
#ne_LAD6 = ne1_H[:idx[0][0]+1]
#ne_LAD9 = ne2_H[:idx[0][0]+1]
#ne_LAD12 = ne3_H[:idx[0][0]+1]
#
#figure(3)
#plot(R_export, ne_LAD6, R_export, ne_LAD9, R_export, ne_LAD12)
#
#np.savetxt('TOPICA_WEST_H-mode_ne_LAD6_Rsep_2.93m.txt', np.c_[R_export, ne_LAD6],
# header=''' WEST H-mode density profile - 20/06/2014 \n Line Average Density = 6e19 m^-2 \n R [m] \t ne [m-3]''' )
#np.savetxt('TOPICA_WEST_H-mode_ne_LAD9_Rsep_2.93m.txt', np.c_[R_export, ne_LAD9],
# header=''' WEST H-mode density profile - 20/06/2014 \n Line Average Density = 9e19 m^-2 \n R [m] \t ne [m-3]''' )
#np.savetxt('TOPICA_WEST_H-mode_ne_LAD12_Rsep_2.93m.txt', np.c_[R_export, ne_LAD12],
# header=''' WEST H-mode density profile - 20/06/2014 \n Line Average Density = 12e19 m^-2 \n R [m] \t ne [m-3]''' )
#
#
## Te profile
#import scipy as sp
## H-mode Te profile from CRONOS
#data = sio.loadmat('Te_WEST_MPHIMNHH_HR_eq_data.mat')
#R_Te = data['RR']
#Te = data['Te'].transpose()
#
#figure(5)
#plot(R_Te, Te)
## Take the same value than the min to fill the SOL up to R=3.0 m
#R_Te = np.append(R_Te, 3)
#Te = np.append(Te, np.min(Te))
#
#plot(R_Te, Te)
#
#Te_resample = np.interp(R_export, R_Te, Te)
#np.savetxt('TOPICA_WEST_H-mode_Te.txt', np.c_[R_export, Te_resample],
# header=''' WEST H-mode temperature profile - 20/06/2014 \n R [m] \t Te [eV]''' ) | python | 4,521 |
"""
Deprecated:
Not used. Integrated with SpaceConnector.
"""
import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core.connector import BaseConnector
from spaceone.core import pygrpc
from spaceone.core.utils import parse_grpc_endpoint
from spaceone.core.error import *
__all__ = ['PluginConnector']
_LOGGER = logging.getLogger(__name__)
class PluginConnector(BaseConnector):
def __init__(self, transaction, config):
super().__init__(transaction, config)
self._check_config()
self._init_client()
def _init_client(self):
for version, uri in self.config['endpoint'].items():
e = parse_grpc_endpoint(uri)
self.client = pygrpc.client(endpoint=e['endpoint'], ssl_enabled=e['ssl_enabled'])
def _check_config(self):
if 'endpoint' not in self.config:
raise ERROR_CONNECTOR_CONFIGURATION(backend=self.__class__.__name__)
if len(self.config['endpoint']) > 1:
raise ERROR_CONNECTOR_CONFIGURATION(backend=self.__class__.__name__)
def get_plugin_endpoint(self, plugin_id, version, domain_id, upgrade_mode='AUTO'):
response = self.client.Plugin.get_plugin_endpoint({
'plugin_id': plugin_id,
'version': version,
'upgrade_mode': upgrade_mode,
'domain_id': domain_id
}, metadata=self.transaction.get_connection_meta())
return response
@staticmethod
def _change_message(message):
return MessageToDict(message, preserving_proto_field_name=True)
| python | 1,572 |
# Copyright 2019 Lorenzo Cabrini
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
from .service import Service
class MariaDB(Service):
def __init__(self, host, **kwargs):
super().__init__(host, **kwargs)
def connect(self, **kwargs):
pass
def backup(self, **kwargs):
pass
| python | 411 |
"""Tests for pixel alogrithms"""
# pylint: disable=no-name-in-module,redefined-outer-name,no-value-for-parameter
# pyright: reportGeneralTypeIssues=false
import numpy as np
import pytest
from seasmon_xr.ops import (
autocorr,
autocorr_1d,
autocorr_tyx,
lroo,
ws2dgu,
ws2doptv,
ws2doptvp,
ws2doptvplc,
ws2dpgu,
)
from seasmon_xr.ops.spi import brentq, gammafit, spifun
@pytest.fixture
def ts():
"""Testdata"""
np.random.seed(42)
x = np.random.gamma(1, size=10)
return x
def test_lroo(ts):
x_lroo = lroo(np.array((ts > 0.9) * 1, dtype="uint8"))
assert x_lroo == 3
def pearson_reference(X, Y):
return ((X - X.mean()) * (Y - Y.mean())).mean() / (X.std() * Y.std())
def autocorr_1d_reference(x, nodata=None):
if nodata is not None:
_x = x.astype("float64")
_x[x == nodata] = np.nan
x = _x
X = x[:-1]
Y = x[1:]
if np.isnan(x).any():
X, Y = X.copy(), Y.copy()
X[np.isnan(X)] = np.nanmean(X)
Y[np.isnan(Y)] = np.nanmean(Y)
return pearson_reference(X, Y)
def test_autocorr(ts):
ac = autocorr(ts.reshape(1, 1, -1))
np.testing.assert_almost_equal(ac, 0.00398337)
np.testing.assert_almost_equal(autocorr_1d(ts), 0.00398337)
np.testing.assert_almost_equal(autocorr_1d_reference(ts), 0.00398337)
np.testing.assert_almost_equal(autocorr_tyx(ts.reshape(-1, 1, 1)), 0.00398337)
def test_autocorr_nodata(ts_ndvi):
nodata, ts = ts_ndvi
rr = autocorr_1d(ts, nodata)
rr_ref = autocorr_1d_reference(ts, nodata)
assert rr == pytest.approx(rr_ref, rel=1e-3)
def test_brentq():
x = brentq(xa=0.6446262296476516, xb=1.5041278691778537, s=0.5278852360624721)
assert x == pytest.approx(1.083449238500003)
def test_gammafit(ts):
parameters = gammafit(ts)
assert parameters == pytest.approx((1.083449238500003, 0.9478709674697126))
def test_spi(ts):
xspi = spifun(ts.reshape(1, 1, -1))
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[-382.0, 1654.0, 588.0, 207.0, -1097.0, -1098.0, -1677.0, 1094.0, 213.0, 514.0],
)
def test_spi_nofit(ts):
xspi = spifun(ts.reshape(1, 1, -1), a=1, b=2)
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[
-809.0,
765.0,
-44.0,
-341.0,
-1396.0,
-1396.0,
-1889.0,
343.0,
-336.0,
-101.0,
],
)
def test_spi_selfit(ts):
xspi = spifun(ts.reshape(1, 1, -1), cal_start=0, cal_stop=3)
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[
-1211.0,
1236.0,
-32.0,
-492.0,
-2099.0,
-2099.0,
-2833.0,
572.0,
-484.0,
-120.0,
],
)
def test_spi_selfit_2(ts):
cal_start = 2
cal_stop = 8
a, b = gammafit(ts[cal_start:cal_stop])
xspi_ref = spifun(ts.reshape(1, 1, -1), a=a, b=b)
xspi = spifun(ts.reshape(1, 1, -1), cal_start=cal_start, cal_stop=cal_stop)
np.testing.assert_equal(xspi, xspi_ref)
def test_ws2dgu(ts):
_ts = ts * 10
z = ws2dgu(_ts, 10, 0)
np.testing.assert_array_equal(z, [15, 14, 12, 9, 8, 7, 7, 9, 10, 12])
def test_ws2dpgu(ts):
_ts = ts * 10
z = ws2dpgu(_ts, 10, 0, 0.9)
np.testing.assert_array_equal(z, [26, 24, 22, 20, 18, 17, 16, 15, 15, 14])
def test_ws2doptv(ts):
_ts = ts * 10
z, l = ws2doptv(_ts, 0, np.arange(-2, 2))
np.testing.assert_array_equal(z, [10, 21, 16, 9, 3, 2, 5, 13, 12, 12])
assert l == pytest.approx(0.31622776601683794)
def test_ws2doptvp(ts):
_ts = ts * 10
z, l = ws2doptvp(_ts, 0, 0.9, np.arange(-2, 2))
np.testing.assert_array_equal(z, [13, 28, 19, 9, 3, 2, 7, 19, 15, 12])
assert l == pytest.approx(0.03162277660168379)
def test_ws2doptvplc(ts):
_ts = (ts * 10).astype("int16")
z, l = ws2doptvplc(_ts, 0, 0.9, 0.9)
np.testing.assert_array_equal(z, [12, 28, 19, 9, 3, 4, 13, 19, 14, 12])
assert l == pytest.approx(0.03162277660168379)
| python | 4,219 |
from django.test import TestCase
from django.forms.models import inlineformset_factory
from regressiontests.inline_formsets.models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': u'test',
'poem_set-0-DELETE': u'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.failUnless(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'0',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'1',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name=u'test')
mother = Parent.objects.create(name=u'mother')
father = Parent.objects.create(name=u'father')
data = {
'child_set-TOTAL_FORMS': u'1',
'child_set-INITIAL_FORMS': u'0',
'child_set-MAX_NUM_FORMS': u'0',
'child_set-0-name': u'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
| python | 4,212 |
import os
import sys
print "*** Simple Planets - As easy as ABC! ***"
name = str(raw_input('Enter run name: '))
steps = int(raw_input('Enter number of pmc steps: '))
eps = float(raw_input('Enter initial tolerance size: '))
min_part = int(raw_input('Enter sample size: '))
n_procs = int(raw_input('Enter number of cores (max 20)'))
print "Running {:}, steps = {:}, epsilon = {:}, samples = {:}".format(name,
steps,
eps,
min_part)
os.makedirs('RUNS/{:}'.format(name))
os.makedirs('RUNS/{:}/KNOWN'.format(name))
os.makedirs('RUNS/{:}/SCIENCE'.format(name))
pbs_head = '''
#!/bin/bash
#
#PBS -N {0:}
#PBS -M [email protected]
#PBS -m abe
#PBS -A ebf11_collab
#PBS -l pmem={1:}gb
#PBS -l nodes=1:ppn={1:}
#PBS -l walltime=048:00:00
#PBS -o runs/
#PBS -e runs/
#PBS -j oe
#
cd $PBS_O_WORKDIR'''.format(name, n_procs)
science = 'pbs_scripts/{:}_science.pbs'.format(name)
known = 'pbs_scripts/{:}_known.pbs'.format(name)
sci_file = file(science, 'w')
print >> sci_file, pbs_head
print >> sci_file, """python simpleplanets_kepler.py {:} {:} {:} {:} {:} False
""".format(name, steps, eps, min_part, n_procs)
print >> sci_file, """python testandplot.py RUNS/{0}/SCIENCE/{0}_{1}samples_{2}.pkl
""".format(name, min_part, steps-1)
sci_file.close()
known_file = file(known, 'w')
print >> known_file, pbs_head
print >> known_file, """python simpleplanets_kepler.py {:} {:} {:} {:} {:} True
""".format(name, steps, eps, min_part, n_procs)
print >> known_file, """python testandplot.py RUNS/{0}/KNOWN/{0}_{1}samples_{2}.pkl
""".format(name, min_part, steps-1)
known_file.close()
os.system('echo "steps = {1:}, epsilon = {2:}, samples = {3:}" > RUNS/{0:}/{0:}_log.txt'.format(name,
steps,
eps,
min_part))
os.system(' git status -u none >> RUNS/{0:}/{0:}_log.txt'.format(name))
os.system(' git rev-parse HEAD >> RUNS/{0:}/{0:}_log.txt'.format(name))
if len(sys.argv) > 1 and sys.argv[1] == 'local':
os.system("python simpleplanets_kepler.py {:} {:} {:} {:} False".format(name, steps, eps, min_part))
os.system("python simpleplanets_kepler.py {:} {:} {:} {:} True".format(name, steps, eps, min_part))
os.system("python testandplot.py RUNS/{0}/SCIENCE/{0}_{1}samples_{2}.pkl".format(name, min_part, steps-1))
os.system("python testandplot.py RUNS/{0}/KNOWN/{0}_{1}samples_{2}.pkl".format(name, min_part, steps-1))
else:
os.system("qsub {:}".format(known))
os.system("qsub {:}".format(science))
| python | 2,902 |
# coding: utf-8
"""pypool_pump package allows to compute the duration of the swiming pool
filtering.
"""
from .__version__ import VERSION, __version__
from .run import Run
from datetime import timedelta, datetime
from typing import List
class FilteringDuration(object):
"""Root class with common parts"""
def __init__(self, percentage: float = 100, schedule_config:dict = {}) -> None:
self._computed_filtering_duration: float = None
self._modifier_pecentage: float = percentage
self._total_duration = None
self._schedule_config = schedule_config
def duration(self) -> float:
#TODO: rename method
"""Filtering duration in hours
If modifier have been set, they will be applied to the computed filtering
duration.
Maximum duration is always 24 hours.
"""
self._total_duration: float = max(
min(self._computed_filtering_duration * self._modifier_pecentage / 100, 24), 0
)
return self._total_duration
def update_schedule(self,pivot_time:datetime) -> List[Run]:
# TODO: Add protection on total duration and schedule config
# TODO: define strategy if total duration + break > 24
first_start = pivot_time - timedelta(hours=(self._total_duration + self._schedule_config['break_duration']) / 3)
first_duration = self._total_duration / 3
second_start = pivot_time + timedelta(hours=2/3 * self._schedule_config['break_duration'])
second_duration = 2 * first_duration
return [Run(first_start, first_duration), Run(second_start, second_duration)]
class DumbFilteringDuration(FilteringDuration):
"""Dumb duration calulation method with taking temperature/2"""
def duration(self, pool_temperature: float) -> float:
"""Filtering duration in hours"""
self._computed_filtering_duration = pool_temperature / 2
return super().duration()
class BasicFilteringDuration(FilteringDuration):
"""Basic duration calculation method with:
- 0 below 10°C
- temperature/3 between 10°C and 14°C
- temperature/2 between 14°C and 30°C
- continuous filtration above 30°C
"""
def duration(self, pool_temperature: float) -> float:
"""Filtering duration in hours"""
if pool_temperature < 10:
# No need to filter below 10°C.
self._computed_filtering_duration = 0
elif pool_temperature < 14:
# between 10 and 14 we can reduce filtering
self._computed_filtering_duration = pool_temperature / 3
elif pool_temperature >= 30:
# Above 30°C it is recommanded to filter continuously.
self._computed_filtering_duration = 24
else:
self._computed_filtering_duration = pool_temperature / 2
return super().duration()
class AbacusFilteringDuration(FilteringDuration):
"""Advanced calculation method using an abacus.
D = a*T^3 + b*T^2 + c*T +d
T is forced at a 10°C minimum
Formula discovered here: https://github.com/scadinot/pool
"""
def duration(self, pool_temperature: float) -> float:
"""Filtering duration in hours"""
# Force temperature at a 10°C minimum to ensure minimum filtration.
temperature: float = max(pool_temperature, 10)
self._computed_filtering_duration = (
0.00335 * temperature ** 3
- 0.14953 * temperature ** 2
+ 2.43489 * temperature
- 10.72859
)
return super().duration()
class PumpCaracteristicFilteringDuration(FilteringDuration):
"""Advanced calculatin method using the caracteristic of your water pump and your
pool.
"""
def __init__(
self, pool_volume: float, pump_flow: float, percentage: float = 100
) -> None:
self.pool_volume = pool_volume
self.pump_flow = pump_flow
super().__init__(percentage)
def duration(
self, pool_temperature: float, number_of_bathers: float = None, schedule_config:dict = {}
) -> float:
"""Filtering duration in hours"""
cycle_duration: float = self.pool_volume / self.pump_flow
if pool_temperature > 25:
self._computed_filtering_duration = 3 * cycle_duration
# TODO: +2 hours if > 28°c ?
elif pool_temperature > 20:
self._computed_filtering_duration = 2 * cycle_duration
elif pool_temperature > 15:
self._computed_filtering_duration = 1 * cycle_duration
elif pool_temperature > 10:
self._computed_filtering_duration = 0.5 * cycle_duration
else:
self._computed_filtering_duration = 0
if number_of_bathers is not None:
bather_modifier: float = number_of_bathers / self.pump_flow * 2
self._computed_filtering_duration = (
self._computed_filtering_duration + bather_modifier
)
return super().duration()
# TODO: caractéristique pompe.
# TODO: ajouter modificateur nombre de personne.
# TODO: ajouter modificateur pour temps lours ou orageux
| python | 5,164 |
def backward_string_by_word(text: str) -> str:
return " ".join([i[::-1] for i in text.split(" ")]) | python | 102 |
from DNAtool import *
print(DNAtool("AATCCGG"))
print(count_nuc("ATTTTA"))
transcriptionDNA("ACT")
GCpercentage("ACGA")
AminoAcidSearch("ALU")
| python | 159 |
"""
This module contains these Primitives classes: `Polyline` and `Primitives`.
"""
from __future__ import absolute_import # noreorder
import math
import os
import time
from collections import OrderedDict
from pyaedt.application.Variables import Variable
from pyaedt.generic.constants import PLANE
from pyaedt.generic.general_methods import _retry_ntimes
from pyaedt.generic.general_methods import is_number
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modeler.GeometryOperators import GeometryOperators
from pyaedt.modeler.Object3d import _dim_arg
from pyaedt.modeler.Object3d import _uname
from pyaedt.modeler.Object3d import EdgePrimitive
from pyaedt.modeler.Object3d import FacePrimitive
from pyaedt.modeler.Object3d import Object3d
from pyaedt.modules.MaterialLib import Material
from pyaedt.modeler.object3dlayout import Point
default_materials = {
"Icepak": "air",
"HFSS": "vacuum",
"Maxwell 3D": "vacuum",
"Maxwell 2D": "vacuum",
"2D Extractor": "copper",
"Q3D Extractor": "copper",
"HFSS 3D Layout": "copper",
"Mechanical": "copper",
}
aedt_wait_time = 0.1
class PolylineSegment:
"""Creates and manipulates a segment of a polyline.
Parameters
----------
type : str
Type of the object. Choices are ``"Line"``, ``"Arc"``, ``"Spline"``,
and ``"AngularArc"``.
num_seg : int, optional
Number of segments for the types ``"Arc"``, ``"Spline"``, and
``"AngularArc"``. The default is ``0``. For the type
``Line``, this parameter is ignored.
num_points : int, optional
Number of control points for the type ``Spline``. For other
types, this parameter
is defined automatically.
arc_angle : float or str, optional
Sweep angle in radians or a valid value string. For example,
``"35deg"`` or ``"Specific
to type AngularArc"``.
arc_center : list or str, optional
List of values in model units or a valid value string. For
example, a list of ``[x, y, z]`` coordinates or ``"Specific to
type AngularArc"``.
arc_plane : str, int optional
Plane in which the arc sweep is performed in the active
coordinate system ``"XY"``, ``"YZ"`` or ``"ZX"``. The default is
``None``, in which case the plane is determined automatically
by the first coordinate for which the starting point and
center point have the same value.
Examples
--------
See :class:`pyaedt.Primitives.Polyline`.
"""
def __init__(self, type, num_seg=0, num_points=0, arc_angle=0, arc_center=None, arc_plane=None):
valid_types = ["Line", "Arc", "Spline", "AngularArc"]
assert type in valid_types, "Segment type must be in {}.".format(valid_types)
self.type = type
if type != "Line":
self.num_seg = num_seg
if type == "Line":
self.num_points = 2
if type == "Spline":
self.num_points = num_points
if "Arc" in type:
self.num_points = 3
if type == "AngularArc":
self.arc_angle = arc_angle
if not arc_center:
arc_center = [0, 0, 0]
assert len(arc_center) == 3, "Arc center must be a list of length 3."
self.arc_center = arc_center
if isinstance(arc_plane, int):
if arc_plane == PLANE.XY:
self.arc_plane = "XY"
elif arc_plane == PLANE.ZX:
self.arc_plane = "ZX"
else:
self.arc_plane = "YZ"
else:
self.arc_plane = arc_plane
class Polyline(Object3d):
"""Creates and manipulates a polyline.
The constructor for this class is intended to be called from the
:func:`pyaedt.modeler.Primitives.Primitives.create_polyline` method.
The documentation is provided there.
The returned Polyline object exposes the methods for manipulating the polyline.
Parameters
----------
primitives : :class:`pyaedt.modeler.Primitives3D.Primitives3D`
Pointer to the parent Primitives object.
src_object : optional
The default is ``None``.
position_list : list, optional
List of positions in the ``[x, y, z]`` format. The default is ``None``.
segment_type : str or list, optional
Define the list of segment types.
Valid arguments are ``"Line"``, ``"Arc"``, ``"Spline"``, ``"AngularArc"``.
The default is ``None``.
cover_surface : bool, optional
The default is ``False``.
close_surface : bool, optional
The default is ``False``.
name : str, optional
The default is ``None``.
matname : str, optional
Name of the material. The default is ``None``.
xsection_type : str, optional
Type of the cross-section. Options are ``"Line"``, ``"Circle"``, ``"Rectangle"``,
and ``"Isosceles Trapezoid"``. The default is ``None``.
xsection_orient : str, optional
Direction of the normal vector to the width of the cross-section.
Options are ``"X"``, ``"Y"``, ``"Z"``, and ``"Auto"``. The
default is ``None``.
xsection_width : float or str, optional
Width or diameter of the cross-section for all types. The default is
``1``.
xsection_topwidth : float or str, optional
Top width of the cross-section for the type ``"Isosceles Trapezoid"`` only.
The default is ``1``.
xsection_height : float or str, optional
Height of the cross-section for the types ``"Rectangle"`` and ``"Isosceles
Trapezoid"`` only. The default is ``1``.
xsection_num_seg : int, optional
Number of segments in the cross-section surface for the types ``"Circle"``,
``"Rectangle"`` and ``"Isosceles Trapezoid"``. The default is ``0``.
The value must be ``0`` or greater than ``2``.
xsection_bend_type : str, optional
Type of the bend. The default is ``None``, in which case the bend type
is set to ``"Corner"``. For the type ``"Circle"``, the bend type
should be set to ``"Curved"``.
"""
def __init__(
self,
primitives,
src_object=None,
position_list=None,
segment_type=None,
cover_surface=False,
close_surface=False,
name=None,
matname=None,
xsection_type=None,
xsection_orient=None,
xsection_width=1,
xsection_topwidth=1,
xsection_height=1,
xsection_num_seg=0,
xsection_bend_type=None,
non_model=False,
):
self._primitives = primitives
if src_object:
self.__dict__ = src_object.__dict__.copy()
if name:
self._m_name = name # This is conimg from
else:
self._id = src_object.id
self._m_name = src_object.name
else:
self._xsection = self._primitives._crosssection_arguments(
type=xsection_type,
orient=xsection_orient,
width=xsection_width,
topwidth=xsection_topwidth,
height=xsection_height,
num_seg=xsection_num_seg,
bend_type=xsection_bend_type,
)
self._positions = [i for i in position_list]
# When close surface or cover_surface are set to True, ensure the start point and end point are coincident,
# and insert a line segment to achieve this if necessary
if cover_surface:
close_surface = True
self._is_closed = close_surface
self._is_covered = cover_surface
self._segment_types = None
if segment_type:
if isinstance(segment_type, (list, tuple)):
# self._segment_types = copy(segment_type)
self._segment_types = [i for i in segment_type]
else:
self._segment_types = segment_type
varg1 = self._point_segment_string_array()
if non_model:
flag = "NonModel#"
else:
flag = ""
varg2 = self._primitives._default_object_attributes(name=name, matname=matname, flags=flag)
new_object_name = _retry_ntimes(10, self.m_Editor.CreatePolyline, varg1, varg2)
Object3d.__init__(self, primitives, name=new_object_name)
self._primitives.objects[self.id] = self
self._primitives.object_id_dict[self.name] = self.id
@property
def start_point(self):
"""List of the ``[x, y, z]`` coordinates for the starting point in the polyline
object in the object's coordinate system.
Returns
-------
list
List of the ``[x, y, z]`` coordinates for the starting point in the polyline
object.
"""
return self.vertices[0].position
@property
def end_point(self):
"""List of the ``[x, y, z]`` coordinates for the ending point in the polyline
object in the object's coordinate system.
Returns
-------
list
List of the ``[x, y, z]`` coordinates for the ending point in the polyline
object.
References
----------
>>> oEditor.GetVertexIDsFromObject
>>> oEditor.GetVertexPosition
"""
return self.vertices[-1].position
@property
def points(self):
"""Polyline Points."""
return self._positions
@property
def vertex_positions(self):
"""List of the ``[x, y, z]`` coordinates for all vertex positions in the
polyline object in the object's coordinate system.
Returns
-------
list
List of the ``[x, y, z]`` coordinates for all vertex positions in the
polyline object.
References
----------
>>> oEditor.GetVertexIDsFromObject
>>> oEditor.GetVertexPosition
"""
id_list = self._primitives.get_object_vertices(partID=self.id)
position_list = [self._primitives.get_vertex_position(id) for id in id_list]
return position_list
@pyaedt_function_handler()
def _pl_point(self, pt):
pt_data = ["NAME:PLPoint"]
pt_data.append("X:=")
pt_data.append(_dim_arg(pt[0], self._primitives.model_units))
pt_data.append("Y:=")
pt_data.append(_dim_arg(pt[1], self._primitives.model_units))
pt_data.append("Z:=")
pt_data.append(_dim_arg(pt[2], self._primitives.model_units))
return pt_data
@pyaedt_function_handler()
def _point_segment_string_array(self):
"""Retrieve the parameter arrays for specifying the points and segments of a polyline
used in the :class:`pyaedt.modeler.Primitives.Polyline` constructor.
Returns
-------
list
"""
position_list = self._positions
segment_types = self._segment_types
assert (
len(position_list) > 0
), "The ``position_list`` argument must be a list of positions with at least one point."
if not segment_types:
segment_types = [PolylineSegment("Line")] * (len(position_list) - 1)
elif isinstance(segment_types, str):
segment_types = [PolylineSegment(segment_types, num_points=len(position_list))]
elif isinstance(segment_types, PolylineSegment):
segment_types = [segment_types]
elif isinstance(segment_types, list):
# Convert all string-type entries in the segment_types list to PolylineSegments
for ind, seg in enumerate(segment_types):
if isinstance(seg, str):
segment_types[ind] = PolylineSegment(seg)
else:
raise ("Invalid segment_types input of type {}".format(type(segment_types)))
# Add a closing point if needed #TODO check for all combinations
varg1 = ["NAME:PolylineParameters"]
varg1.append("IsPolylineCovered:=")
varg1.append(self._is_covered)
varg1.append("IsPolylineClosed:=")
varg1.append(self._is_closed)
# PointsArray
points_str = ["NAME:PolylinePoints"]
points_str.append(self._pl_point(position_list[0]))
# Segments Array
segment_str = ["NAME:PolylineSegments"]
pos_count = 0
vertex_count = 0
index_count = 0
while vertex_count <= len(segment_types):
try:
current_segment = None
if vertex_count == len(segment_types):
if self._is_closed:
# Check the special case of a closed polyline needing an additional Line segment
if position_list[0] != position_list[-1]:
position_list.append(position_list[0])
current_segment = PolylineSegment("Line")
else:
break
else:
current_segment = segment_types[vertex_count]
except IndexError:
raise ("Number of segments inconsistent with the number of points!")
if current_segment:
seg_str = self._segment_array(
current_segment, start_index=index_count, start_point=position_list[pos_count]
)
segment_str.append(seg_str)
pos_count_incr = 0
for i in range(1, current_segment.num_points):
if current_segment.type == "AngularArc":
points_str.append(self._pl_point(current_segment.extra_points[i - 1]))
index_count += 1
else:
if (pos_count + i) == len(position_list):
if current_segment.type == "Arc" and self._is_closed:
position_list.append(position_list[0])
else:
err_str = "Insufficient points in position_list to complete the specified segment list"
raise IndexError(err_str)
points_str.append(self._pl_point(position_list[pos_count + i]))
pos_count_incr += 1
index_count += 1
pos_count += pos_count_incr
vertex_count += 1
else:
break
varg1.append(points_str)
varg1.append(segment_str)
# Poly Line Cross Section
varg1.append(self._xsection)
return varg1
@pyaedt_function_handler()
def _segment_array(self, segment_data, start_index=0, start_point=None):
"""Retrieve a property array for a polyline segment for use in the
:class:`pyaedt.modeler.Primitives.Polyline` constructor.
Parameters
----------
segment_data : :class:`pyaedt.modeler.Primitives.PolylineSegment` or str
Pointer to the calling object that provides additional functionality
or a string with the segment type ``Line`` or ``Arc``.
start_index : int, string
Starting vertex index of the segment within a compound polyline. The
default is ``0``.
start_point : list, optional
Position of the first point for type ``AngularArc``. The default is
``None``. Float values are considered in model units.
Returns
------
list
List of properties defining a polyline segment.
"""
if isinstance(segment_data, str):
segment_data = PolylineSegment(segment_data)
seg = [
"NAME:PLSegment",
"SegmentType:=",
segment_data.type,
"StartIndex:=",
start_index,
"NoOfPoints:=",
segment_data.num_points,
]
if segment_data.type != "Line":
seg += ["NoOfSegments:=", "{}".format(segment_data.num_seg)]
if segment_data.type == "AngularArc":
# from start-point and angle, calculate the mid- and end-points
# Also identify the plane of the arc ("YZ", "ZX", "XY")
plane_axes = {"YZ": [1, 2], "ZX": [2, 0], "XY": [0, 1]}
assert start_point, "Start-point must be defined for an AngularArc Segment"
c_xyz = self._primitives.value_in_object_units(segment_data.arc_center)
p0_xyz = self._primitives.value_in_object_units(start_point)
if segment_data.arc_plane:
# Accept the user input for the plane of rotation - let the modeler fail if invalid
plane_def = (segment_data.arc_plane, plane_axes[segment_data.arc_plane])
else:
# Compare the numeric values of start-point and center-point to determine the orientation plane
if c_xyz[0] == p0_xyz[0]:
plane_def = ("YZ", plane_axes["YZ"])
elif c_xyz[1] == p0_xyz[1]:
plane_def = ("ZX", plane_axes["ZX"])
elif c_xyz[2] == p0_xyz[2]:
plane_def = ("XY", plane_axes["XY"])
else:
raise ("Start point and arc-center do not lie on a common base plane.")
mod_units = self._primitives.model_units
seg += [
"ArcAngle:=",
segment_data.arc_angle,
"ArcCenterX:=",
"{}".format(_dim_arg(segment_data.arc_center[0], mod_units)),
"ArcCenterY:=",
"{}".format(_dim_arg(segment_data.arc_center[1], mod_units)),
"ArcCenterZ:=",
"{}".format(_dim_arg(segment_data.arc_center[2], mod_units)),
"ArcPlane:=",
plane_def[0],
]
# Calculate the extra two points of the angular arc in the alpha-beta plane
alph_index = plane_def[1][0]
beta_index = plane_def[1][1]
c_alph = c_xyz[alph_index]
c_beta = c_xyz[beta_index]
p0_alph = p0_xyz[alph_index] - c_alph
p0_beta = p0_xyz[beta_index] - c_beta
# rotate to generate the new points
arc_ang_rad = self._primitives._app.evaluate_expression(segment_data.arc_angle)
rot_angle = arc_ang_rad * 0.5
p1_alph = p0_alph * math.cos(rot_angle) + p0_beta * math.sin(rot_angle)
p1_beta = p0_beta * math.cos(rot_angle) - p0_alph * math.sin(rot_angle)
p2_alph = p1_alph * math.cos(rot_angle) + p1_beta * math.sin(rot_angle)
p2_beta = p1_beta * math.cos(rot_angle) - p1_alph * math.sin(rot_angle)
# Generate the 2 new points in XYZ
p1 = list(p0_xyz)
p1[alph_index] = p1_alph + c_alph
p1[beta_index] = p1_beta + c_alph
p2 = list(p0_xyz)
p2[alph_index] = p2_alph + c_alph
p2[beta_index] = p2_beta + c_beta
segment_data.extra_points = [p1, p2]
return seg
@pyaedt_function_handler()
def clone(self):
"""Clone a polyline object.
Returns
-------
pyaedt.modeler.Primitives.Polyline
Polyline object that was created.
References
----------
>>> oEditor.Copy
>>> oEditor.Paste
Examples
--------
>>> primitives = self.aedtapp.modeler
>>> P1 = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P2 = P1.clone()
"""
vArg1 = ["NAME:Selections", "Selections:=", self.name]
self._primitives.oeditor.Copy(vArg1)
self._primitives.oeditor.Paste()
return self._add_new_polyline()
@pyaedt_function_handler()
def _add_new_polyline(self):
new_objects = self._primitives.find_new_objects()
assert len(new_objects) == 1
new_name = new_objects[0]
new_polyline = Polyline(self._primitives, src_object=self, name=new_name)
new_polyline._id = None
self._primitives.objects[new_polyline.id] = new_polyline
self._primitives.object_id_dict[new_name] = new_polyline.id
return new_polyline
@pyaedt_function_handler()
def remove_vertex(self, position, abstol=1e-9):
"""Remove a vertex from an existing polyline by position.
You must enter the exact position of the vertex as a list
of ``[x, y, z]`` coordinates in the object's coordinate system.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates specifying the vertex to remove.
abstol : float, optional
Absolute tolerance of the comparison of a specified position to the
vertex positions. The default is ``1e-9``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.DeletePolylinePoint
Examples
--------
Use floating point values for the vertex positions.
>>> P = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P.remove_vertex([0, 1, 2])
Use string expressions for the vertex position.
>>> P = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P.remove_vertex(["0mm", "1mm", "2mm"])
Use string expressions for the vertex position and include an absolute
tolerance when searching for the vertex to be removed.
>>> P = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P.remove_vertex(["0mm", "1mm", "2mm"], abstol=1e-6)
"""
found_vertex = False
if self._primitives._app._is_object_oriented_enabled():
obj = self._primitives.oeditor.GetChildObject(self._m_name).GetChildObject("CreatePolyline:1")
segments = obj.GetChildNames()
seg_id = 0
for seg in segments:
point = obj.GetChildObject(seg).GetPropValue("Point1")
p = self._primitives.value_in_object_units([point[1], point[3], point[5]])
pos_xyz = self._primitives.value_in_object_units(position)
found_vertex = GeometryOperators.points_distance(p, pos_xyz) <= abstol
if found_vertex:
at_start = True
break
point = obj.GetChildObject(seg).GetPropValue("Point2")
p = self._primitives.value_in_object_units([point[1], point[3], point[5]])
found_vertex = GeometryOperators.points_distance(p, pos_xyz) <= abstol
if found_vertex:
at_start = False
break
seg_id += 1
else: # pragma: no cover
pos_xyz = self._primitives.value_in_object_units(position)
for ind, vertex_pos in enumerate(self.vertex_positions):
# compare the specified point with the vertex data using an absolute tolerance
# (default of math.isclose is 1e-9 which should be ok in almost all cases)
found_vertex = GeometryOperators.points_distance(vertex_pos, pos_xyz) <= abstol
if found_vertex:
if ind == len(self.vertex_positions) - 1:
seg_id = ind - 1
at_start = True
else:
seg_id = ind
at_start = False
break
assert found_vertex, "Specified vertex {} not found in polyline {}.".format(position, self._m_name)
self._primitives.oeditor.DeletePolylinePoint(
[
"NAME:Delete Point",
"Selections:=",
self._m_name + ":CreatePolyline:1",
"Segment Indices:=",
[seg_id],
"At Start:=",
at_start,
]
)
return True
@pyaedt_function_handler()
def remove_edges(self, edge_id):
"""Remove a vertex from an existing polyline by position.
You must enter the exact position of the vertex as a list
of ``[x, y, z]`` coordinates in the object's coordinate system.
Parameters
----------
edge_id : int or list of int
One or more edge IDs within the total number of edges within the polyline.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.DeletePolylinePoint
Examples
--------
>>> P = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P.remove_edges(edge_id=0)
"""
if isinstance(edge_id, int):
edge_id = [edge_id]
try:
self._primitives.oeditor.DeletePolylinePoint(
[
"NAME:Delete Point",
"Selections:=",
self.name + ":CreatePolyline:1",
"Segment Indices:=",
edge_id,
"At Start:=",
True,
]
)
except:
raise ValueError("Invalid edge ID {} is specified on polyline {}.".format(edge_id, self.name))
return True
@pyaedt_function_handler()
def set_crosssection_properties(
self, type=None, orient=None, width=0, topwidth=0, height=0, num_seg=0, bend_type=None
):
"""Set the properties of an existing polyline object.
Parameters
----------
type : str, optional
Types of the cross-sections. Options are ``"Line"``, ``"Circle"``, ``"Rectangle"``,
and ``"Isosceles Trapezoid"``. The default is ``None``.
orient : str, optional
Direction of the normal vector to the width of the cross-section.
Options are ``"X"``, ``"Y"``, ``"Z"``, and ``"Auto"``. The default
is ``None``, which sets the orientation to ``"Auto"``.
width : float or str, optional
Width or diameter of the cross-section for all types. The default is
``0``.
topwidth : float or str
Top width of the cross-section for the type ``"Isosceles Trapezoid"``
only. The default is ``0``.
height : float or str
Height of the cross-section for the types ``"Rectangle"`` and `"Isosceles
Trapezoid"`` only. The default is ``0``.
num_seg : int, optional
Number of segments in the cross-section surface for the types ``"Circle"``,
``"Rectangle"``, and ``"Isosceles Trapezoid"``. The default is ``0``.
The value must be ``0`` or greater than ``2``.
bend_type : str, optional
Type of the bend. The default is ``None``, in which case the bend type
is set to ``"Corner"``. For the type ``"Circle"``, the bend type should be
set to ``"Curved"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.ChangeProperty
Examples
--------
>>> P = modeler.create_polyline([[0, 1, 2], [0, 2, 3], [2, 1, 4]])
>>> P.set_crosssection_properties(type="Circle", width="1mm")
"""
# Set the default section type to "None"
section_type = type
if not section_type:
section_type = "None"
# Set the default orientation to "Auto"
section_orient = orient
if not section_orient:
section_orient = "Auto"
# Set the default bend-type to "Corner"
section_bend = bend_type
if not section_bend:
section_bend = "Corner"
# Ensure number-of segments is valid
if num_seg:
assert num_seg > 2, "Number of segments for a cross-section must be 0 or greater than 2."
model_units = self._primitives.model_units
arg1 = ["NAME:AllTabs"]
arg2 = ["NAME:Geometry3DCmdTab", ["NAME:PropServers", self._m_name + ":CreatePolyline:1"]]
arg3 = ["NAME:ChangedProps"]
arg3.append(["NAME:Type", "Value:=", section_type])
arg3.append(["NAME:Orientation", "Value:=", section_orient])
arg3.append(["NAME:Bend Type", "Value:=", section_bend])
arg3.append(["NAME:Width/Diameter", "Value:=", _dim_arg(width, model_units)])
if section_type == "Rectangle":
arg3.append(["NAME:Height", "Value:=", _dim_arg(height, model_units)])
elif section_type == "Circle":
arg3.append(["NAME:Number of Segments", "Value:=", num_seg])
elif section_type == "Isosceles Trapezoid":
arg3.append(["NAME:Top Width", "Value:=", _dim_arg(topwidth, model_units)])
arg3.append(["NAME:Height", "Value:=", _dim_arg(height, model_units)])
arg2.append(arg3)
arg1.append(arg2)
self._primitives.oeditor.ChangeProperty(arg1)
self._update()
return True
@pyaedt_function_handler()
def insert_segment(self, position_list, segment=None, segment_number=0):
"""Add a segment to an existing polyline.
Parameters
----------
position_list : list
List of positions of the points that define the segment to insert.
Either the starting point or ending point of the segment list must
match one of the vertices of the existing polyline.
segment : str or :class:`pyaedt.modeler.Primitives.PolylineSegment`
Definition of the segment to insert. For the types ``"Line"`` and ``"Arc"``,
use their string values ``"Line"`` and ``"Arc"``. For the types ``"AngularArc"``
and ``"Spline"``, use the :class:`pyaedt.modeler.Primitives.PolylineSegment`
object to define the segment precisely.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.InsertPolylineSegment
"""
# Check for a valid number of points
num_points = len(position_list)
# define the segment type from the number of points given
if not segment:
if num_points == 2:
segment = PolylineSegment("Line")
elif num_points == 3:
segment = PolylineSegment("Arc")
else:
segment = PolylineSegment("Spline", num_points=num_points)
else:
if isinstance(segment, str):
segment = PolylineSegment(segment)
num_points = segment.num_points
# Check whether start-point of the segment is in the existing vertices
start_point = self._primitives.value_in_object_units(position_list[0])
# End point does not exist e.g. for an AngularArc
try:
end_point = self._primitives.value_in_object_units(position_list[num_points - 1])
except:
end_point = []
segment_id = 1
segment_index = 0
num_vertices = len(self.vertices)
for vertex in self.vertices:
if vertex.position == end_point:
if vertex.id == self.vertices[0].id:
if segment_id > 0:
segment_id -= 1
at_start = True
break
# If start_point=[0, 0, 0] (a list of integers provided by the user), it won't be equal to vertex.position
# that returns a list of float: [0., 0., 0.]. Thus we cast start_point as a list of floats.
elif vertex.position == [float(x) for x in start_point]:
at_start = False
if segment_index > 0:
segment_index -= 1
break
segment_index += 1
id_v = 0
if isinstance(self._segment_types, list):
s_types = [i for i in self._segment_types]
else:
s_types = [self._segment_types]
for el in s_types:
if isinstance(s_types, PolylineSegment):
id_v += el.num_seg - 1
if id_v > segment_index:
id_v -= el.num_seg - 1
break
segment_index -= id_v
assert segment_index < num_vertices, "Vertex for the insert is not found."
type = segment.type
varg1 = ["NAME:Insert Polyline Segment="]
varg1.append("Selections:=")
varg1.append(self._m_name + ":CreatePolyline:1")
varg1.append("Segment Indices:=")
varg1.append([segment_index])
varg1.append("At Start:=")
varg1.append(at_start)
varg1.append("SegmentType:=")
varg1.append(type)
# Points and segment data
varg2 = ["NAME:PolylinePoints"]
if segment.type == "Line" or segment.type == "Spline" or segment.type == "Arc":
for pt in position_list[0:num_points]:
varg2.append(self._pl_point(pt))
varg1.append(varg2)
elif segment.type == "AngularArc":
seg_str = self._segment_array(segment, start_point=start_point)
varg2.append(self._pl_point(start_point))
varg2.append(self._pl_point(segment.extra_points[0]))
varg2.append(self._pl_point(segment.extra_points[1]))
varg1.append(varg2)
varg1 += seg_str[9:]
self._primitives.oeditor.InsertPolylineSegment(varg1)
if segment.type == "Spline":
varg1 = ["NAME:AllTabs"]
varg2 = ["NAME:Geometry3DPolylineTab"]
varg3 = ["NAME:PropServers"]
varg3.append(self._m_name + ":CreatePolyline:1" + ":Segment" + str(segment_id))
varg2.append(varg3)
varg4 = ["NAME:ChangedProps"]
varg5 = ["NAME:Number of Segments"]
varg5.append("Value:=")
varg5.append(str(segment_number))
varg4.append(varg5)
varg2.append(varg4)
varg1.append(varg2)
self._primitives.oeditor.ChangeProperty(varg1)
return True
class Primitives(object):
"""Provides common functionalities for primitives.
Parameters
----------
application : :class:`pyaedt.modeler.Model3D.Modeler3D`, :class:`pyaedt.modeler.Model2D.Modeler2D`
Pointer to the parent object.
Examples
--------
Basic usage demonstrated with an HFSS design:
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> prim = aedtapp.modeler
"""
def __init__(self):
self.refresh()
@property
def _modeler(self):
return self._app.modeler
@property
def solid_objects(self):
"""List of all solid objects."""
return [self[name] for name in self.solid_names]
@property
def sheet_objects(self):
"""List of all sheet objects."""
return [self[name] for name in self.sheet_names]
@property
def line_objects(self):
"""List of all line objects."""
return [self[name] for name in self.line_names]
@property
def points(self):
"""List of points."""
return self._points
@property
def points_by_name(self):
"""Dictionary containing all points where the keys are the name of the points."""
return self._point_names
@property
def unclassified_objects(self):
"""List of all unclassified objects."""
self._refresh_unclassified()
return [self[name] for name in self._unclassified]
@property
def object_list(self):
"""List of all objects."""
self._refresh_object_types()
return [self[name] for name in self._all_object_names]
@property
def solid_names(self):
"""List of the names of all solid objects."""
self._refresh_solids()
return self._solids
@property
def sheet_names(self):
"""List of the names of all sheet objects."""
self._refresh_sheets()
return self._sheets
@property
def line_names(self):
"""List of the names of all line objects."""
self._refresh_lines()
return self._lines
@property
def unclassified_names(self):
"""List of the names of all unclassified objects."""
self._refresh_unclassified()
return self._unclassified
@property
def object_names(self):
"""List of the names of all objects."""
self._refresh_object_types()
return self._all_object_names
@property
def components_3d_names(self):
"""List of the names of all 3d components objects.
References
----------
>>> oEditor.Get3DComponentDefinitionNames
>>> oEditor.Get3DComponentInstanceNames
"""
obs3d = []
try:
comps3d = self.oeditor.Get3DComponentDefinitionNames()
for comp3d in comps3d:
obs3d += list(self.oeditor.Get3DComponentInstanceNames(comp3d))
except Exception as e:
obs3d = []
return obs3d
@property
def _oproject(self):
"""Project."""
return self._app.oproject
@property
def _odesign(self):
"""Design."""
return self._app._odesign
@property
def _materials(self):
"""Material Manager that is used to manage materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Material Manager that is used to manage materials in the project.
"""
return self._app.materials
@property
def defaultmaterial(self):
"""Default material."""
return default_materials[self._app._design_type]
@property
def logger(self):
"""Logger."""
return self._app.logger
@property
def version(self):
"""Version."""
return self._app._aedt_version
@property
def modeler(self):
"""Modeler."""
return self._modeler
@property
def model_units(self):
"""Model units."""
return self.modeler.model_units
@property
def model_objects(self):
"""List of the names of all model objects."""
return self._get_model_objects(model=True)
@property
def non_model_objects(self):
"""List of names of all non-model objects."""
return self._get_model_objects(model=False)
@property
def model_consistency_report(self):
"""Summary of detected inconsistencies between the AEDT modeler and PyAEDT structures.
Returns
-------
dict
"""
obj_names = self.object_names
missing = []
for name in obj_names:
if name not in self.object_id_dict:
missing.append(name)
non_existent = []
for name in self.object_id_dict:
if name not in obj_names and name not in self.unclassified_names:
non_existent.append(name)
report = {"Missing Objects": missing, "Non-Existent Objects": non_existent}
return report
@pyaedt_function_handler()
def _change_geometry_property(self, vPropChange, names_list):
names = self._app.modeler.convert_to_selections(names_list, True)
vChangedProps = ["NAME:ChangedProps", vPropChange]
vPropServers = ["NAME:PropServers"]
for el in names:
vPropServers.append(el)
vGeo3d = ["NAME:Geometry3DAttributeTab", vPropServers, vChangedProps]
vOut = ["NAME:AllTabs", vGeo3d]
_retry_ntimes(10, self.oeditor.ChangeProperty, vOut)
if "NAME:Name" in vPropChange:
self.cleanup_objects()
return True
@pyaedt_function_handler()
def _change_point_property(self, vPropChange, names_list):
names = self._app.modeler.convert_to_selections(names_list, True)
vChangedProps = ["NAME:ChangedProps", vPropChange]
vPropServers = ["NAME:PropServers"]
for el in names:
vPropServers.append(el)
vGeo3d = ["NAME:Geometry3DPointTab", vPropServers, vChangedProps]
vOut = ["NAME:AllTabs", vGeo3d]
_retry_ntimes(10, self.oeditor.ChangeProperty, vOut)
if "NAME:Name" in vPropChange:
self.cleanup_objects()
return True
@pyaedt_function_handler()
def update_object(self, obj):
"""Update any :class:`pyaedt.modeler.Object3d.Object3d` derivatives
that have potentially been modified by a modeler operation.
Parameters
----------
obj : int, str, or :class:`pyaedt.modeler.Object3d.Object3d`
Object to be updated after a modeler operation.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
Updated 3D object.
"""
o = self._resolve_object(obj)
o._update()
return o
@pyaedt_function_handler()
def value_in_object_units(self, value):
"""Convert one or more strings for numerical lengths to floating point values.
Parameters
----------
value : string or list of strings
One or more strings for numerical lengths. For example, ``"10mm"``
or ``["10mm", "12mm", "14mm"]``. When a a list is given, the entire
list is converted.
Returns
-------
float or list of floats
Defined in object units :attr:`pyaedt.modeler.Primitives.Polyline.object_units`
"""
# Convert to a list if a scalar is presented
scalar = False
if not isinstance(value, list):
value = [value]
scalar = True
numeric_list = []
for element in value:
if is_number(element):
num_val = element
elif isinstance(element, str):
# element is an existing variable
si_value = self._app.evaluate_expression(element)
v = Variable("{}meter".format(si_value))
v.rescale_to(self.model_units)
num_val = v.numeric_value
else:
raise ("Inputs to value_in_object_units must be strings or numbers.")
numeric_list.append(num_val)
if scalar:
return numeric_list[0]
else:
return numeric_list
@pyaedt_function_handler()
def does_object_exists(self, object):
""" "Check to see if an object exists.
Parameters
----------
object : str, int
Object name or object ID.
Returns
-------
bool
``True`` when successful, ``False`` when failed
"""
if isinstance(object, int):
if object in self.objects:
return True
else:
return False
else:
for el in self.objects:
if self.objects[el].name == object:
return True
return False
@pyaedt_function_handler()
def create_region(self, pad_percent=300):
"""Create an air region.
Parameters
----------
pad_percent : float or list of floats, optional
If a float, use padding in percent for all dimensions. The default is ``300``.
If a list of floats, interpret as adding for ``["+X", "+Y", "+Z", "-X", "-Y", "-Z"]``.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
Region object.
References
----------
>>> oEditor.CreateRegion
"""
if "Region" in self.object_names:
return None
if is_number(pad_percent):
pad_percent = [pad_percent] * 6
arg = ["NAME:RegionParameters"]
p = ["+X", "+Y", "+Z", "-X", "-Y", "-Z"]
i = 0
for pval in p:
pvalstr = str(pval) + "PaddingType:="
qvalstr = str(pval) + "Padding:="
arg.append(pvalstr)
arg.append("Percentage Offset")
arg.append(qvalstr)
arg.append(str(pad_percent[i]))
i += 1
arg2 = [
"NAME:Attributes",
"Name:=",
"Region",
"Flags:=",
"Wireframe#",
"Color:=",
"(143 175 143)",
"Transparency:=",
0.75,
"PartCoordinateSystem:=",
"Global",
"UDMId:=",
"",
"MaterialValue:=",
'"air"',
"SurfaceMaterialValue:=",
'""',
"SolveInside:=",
True,
"IsMaterialEditable:=",
True,
"UseMaterialAppearance:=",
False,
"IsLightweight:=",
False,
]
self.oeditor.CreateRegion(arg, arg2)
return self._create_object("Region")
@pyaedt_function_handler()
def create_object_from_edge(self, edge):
"""Create a line object from an edge ID or from an
:class:`pyaedt.modeler.Object3d.EdgePrimitive` object.
Parameters
----------
edge : int or :class:`pyaedt.modeler.Object3d.EdgePrimitive`
Edge ID or :class:`pyaedt.modeler.Object3d.EdgePrimitive` object.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateObjectFromEdges
"""
if isinstance(edge, EdgePrimitive):
edge_id = edge.id
else:
edge_id = edge
obj = self._find_object_from_edge_id(edge_id)
if obj is not None:
varg1 = ["NAME:Selections"]
varg1.append("Selections:="), varg1.append(obj)
varg1.append("NewPartsModelFlag:="), varg1.append("Model")
varg2 = ["NAME:BodyFromEdgeToParameters"]
varg2.append("Edges:="), varg2.append([edge_id])
new_object_name = self.oeditor.CreateObjectFromEdges(varg1, ["NAME:Parameters", varg2])[0]
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_object_from_face(self, face):
"""Create an object from a face.
Parameters
----------
face : int or :class:`pyaedt.modeler.Object3d.FacePrimitive`
Face ID or :class:`pyaedt.modeler.Object3d.FacePrimitive` object.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateObjectFromFaces
"""
face_id = face
if isinstance(face, FacePrimitive):
face_id = face.id
obj = self._find_object_from_face_id(face_id)
if obj is not None:
varg1 = ["NAME:Selections"]
varg1.append("Selections:="), varg1.append(obj)
varg1.append("NewPartsModelFlag:="), varg1.append("Model")
varg2 = ["NAME:BodyFromFaceToParameters"]
varg2.append("FacesToDetach:="), varg2.append([face_id])
new_object_name = self.oeditor.CreateObjectFromFaces(varg1, ["NAME:Parameters", varg2])[0]
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_polyline(
self,
position_list,
segment_type=None,
cover_surface=False,
close_surface=False,
name=None,
matname=None,
xsection_type=None,
xsection_orient=None,
xsection_width=1,
xsection_topwidth=1,
xsection_height=1,
xsection_num_seg=0,
xsection_bend_type=None,
non_model=False,
):
"""Draw a polyline object in the 3D modeler.
This method retrieves the
:class:`pyaedt.modeler.Primitives.Polyline` object, which has
additional methods for manipulating the polyline. For example,
you can use
:func:`pyaedt.modeler.Primitives.Polyline.insert_segment` to
insert a segment or
:attr:`pyaedt.modeler.Primitives.Polyline.id` to retrieve the
ID of the polyline object.
Parameters
----------
position_list : list
Array of positions of each point of the polyline. A
position is a list of 2D or 3D coordinates. Position
coordinate values can be numbers or valid AEDT string
expressions. For example, ``[0, 1, 2]``, ``["0mm", "5mm",
"1mm"]``, or ``["x1", "y1", "z1"]``.
segment_type : str or PolylineSegment or list, optional
The default behavior is to connect all points as
``"Line"`` segments. The default is ``None``. For a
string, ``"Line"`` or ``"Arc"`` is valid. For a
``"PolylineSegment"``, for ``"Line",`` ``"Arc"``,
``"Spline"``, or ``"AngularArc"``, a list of segment types
(str or
:class:`pyaedt.modeler.Primitives.PolylineSegment`) is
valid for a compound polyline.
cover_surface : bool, optional
The default is ``False``.
close_surface : bool, optional
The default is ``False``, which automatically joins the
starting and ending points.
name : str, optional
Name of the polyline. The default is ``None``.
matname : str, optional
Name of the material. The default is ``None``, in which case the
default material is assigned.
xsection_type : str, optional
Type of the cross-section. Options are ``"Line"``, ``"Circle"``,
``"Rectangle"``, and ``"Isosceles Trapezoid"``. The default is ``None``.
xsection_orient : str, optional
Direction of the normal vector to the width of the cross-section.
Options are ``"X"``, ``"Y"``, ``"Z"``, and ``"Auto"``. The default is
``None``, which sets the direction to ``"Auto"``.
xsection_width : float or str, optional
Width or diameter of the cross-section for all types. The
default is ``1``.
xsection_topwidth : float or str, optional
Top width of the cross-section for type ``"Isosceles Trapezoid"`` only.
The default is ``1``.
xsection_height : float or str
Height of the cross-section for type ``"Rectangle"`` or ``"Isosceles
Trapezoid"`` only. The default is ``1``.
xsection_num_seg : int, optional
Number of segments in the cross-section surface for type ``"Circle"``,
``"Rectangle"``, or ``"Isosceles Trapezoid"``. The default is ``0``. The
value must be ``0`` or greater than ``2``.
xsection_bend_type : str, optional
Type of the bend for the cross-section. The default is
``None``, in which case the bend type is set to
``"Corner"``. For the type ``"Circle"``, the bend type
should be set to ``"Curved"``.
non_model : bool, optional
Either if the polyline will be created as model or unmodel object.
Returns
-------
pyaedt.modeler.Primitives.Polyline
Polyline object.
References
----------
>>> oEditor.CreatePolyline
Examples
--------
Set up the desktop environment.
>>> from pyaedt.desktop import Desktop
>>> from pyaedt.maxwell import Maxwell3d
>>> from pyaedt.modeler.Primitives import PolylineSegment
>>> desktop=Desktop(specified_version="2021.2", new_desktop_session=False)
>>> aedtapp = Maxwell3D()
>>> aedtapp.modeler.model_units = "mm"
>>> modeler = aedtapp.modeler
Define some test data points.
>>> test_points = [["0mm", "0mm", "0mm"], ["100mm", "20mm", "0mm"],
... ["71mm", "71mm", "0mm"], ["0mm", "100mm", "0mm"]]
The default behavior assumes that all points are to be
connected by line segments. Optionally specify the name.
>>> P1 = modeler.create_polyline(test_points, name="PL_line_segments")
Specify that the first segment is a line and the last three
points define a three-point arc.
>>> P2 = modeler.create_polyline(test_points, segment_type=["Line", "Arc"], name="PL_line_plus_arc")
Redraw the 3-point arc alone from the last three points and
additionally specify five segments using ``PolylineSegment``.
>>> P3 = modeler.create_polyline(test_points[1:],
... segment_type=PolylineSegment(type="Arc", num_seg=7),
... name="PL_segmented_arc")
Specify that the four points form a spline and add a circular
cross-section with a diameter of 1 mm.
>>> P4 = modeler.create_polyline(test_points, segment_type="Spline", name="PL_spline",
... xsection_type="Circle", xsection_width="1mm")
Use the `PolylineSegment` object to specify more detail about
the individual segments. Create a center point arc starting
from the position ``test_points[1]``, rotating about the
center point position ``test_points[0]`` in the XY plane.
>>> start_point = test_points[1]
>>> center_point = test_points[0]
>>> segment_def = PolylineSegment(type="AngularArc", arc_center=center_point, arc_angle="90deg", arc_plane="XY")
>>> modeler.create_polyline(start_point, segment_type=segment_def, name="PL_center_point_arc")
Create a spline using a list of variables for the coordinates of the points.
>>> x0, y0, z0 = "0", "0", "1"
>>> x1, y1, z1 = "1", "3", "1"
>>> x2, y2, z2 = "2", "2", "1"
>>> P5 = modeler.create_polyline(position_list = [[x0, y0, z0], [x1, y1, z1], [x2, y2, z2]],
... segment_type="Spline", name="polyline_with_variables")
"""
new_polyline = Polyline(
primitives=self,
position_list=position_list,
segment_type=segment_type,
cover_surface=cover_surface,
close_surface=close_surface,
name=name,
matname=matname,
xsection_type=xsection_type,
xsection_orient=xsection_orient,
xsection_width=xsection_width,
xsection_topwidth=xsection_topwidth,
xsection_height=xsection_height,
xsection_num_seg=xsection_num_seg,
xsection_bend_type=xsection_bend_type,
non_model=non_model,
)
return new_polyline
@pyaedt_function_handler()
def create_spiral_on_face(self, face, poly_width, filling_factor=1.5):
"""Create a Spiral Polyline inside a face.
Parameters
----------
face : int or str or :class:`pyaedt.modeler.Object3d.FacePrimitive`
poly_width : float
filling_factor : float
Returns
-------
:class:`pyaedt.modeler.Object3d.Polyline`
"""
# fmt: off
if isinstance(face, FacePrimitive):
face_id = face.id
elif isinstance(face, int):
face_id = face
else:
face_id = self.get_object_faces(face)[0]
vertices = self.get_face_vertices(face_id)
vertex_coordinates = []
for v in vertices:
vertex_coordinates.append(self.get_vertex_position(v))
centroid = self.get_face_center(face_id)
segments_lengths = []
for vc in vertex_coordinates:
segments_lengths.append(GeometryOperators.points_distance(vc, centroid))
n = math.floor(min(segments_lengths) / (poly_width * filling_factor))
if n % 2 == 0:
n_points = int(n / 2 - 1)
else:
n_points = int((n - 1) / 2)
if n_points < 1:
raise Exception
inner_points = []
for vc in vertex_coordinates:
temp = [[] for i in range(n_points)]
for i in range(3): # loop for x, y, z
delta = (centroid[i] - vc[i]) / (n_points + 1)
for j in range(1, n_points + 1):
temp[j - 1].append(vc[i] + delta * j)
inner_points.append(temp)
poly_points_list = []
for p in range(n_points):
for v in inner_points:
poly_points_list.append(v[p])
del poly_points_list[-1]
# fmt: on
return self.create_polyline(poly_points_list, xsection_type="Line", xsection_width=poly_width)
@pyaedt_function_handler()
def get_existing_polyline(self, object):
"""Retrieve a polyline object to manipulate it.
Parameters
----------
src_object : :class:`pyaedt.modeler.Object3d.Object3d`
An existing polyline object in the 3D Modeler.
Returns
-------
Polyline
"""
return Polyline(self, src_object=object)
@pyaedt_function_handler()
def create_udp(self, udp_dll_name, udp_parameters_list, upd_library="syslib", name=None, udp_type="Solid"):
"""Create a user-defined primitive (UDP).
Parameters
----------
udp_dll_name : str
Name of the UPD DLL.
udp_parameters_list :
List of the UDP parameters.
upd_library : str, optional
Name of the UDP library. The default is ``"syslib"``.
name : str, optional
Name of the component. The default is ``None``.
udp_type : str, optional
Type of the UDP. The default is ``"Solid"``.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
UDP object created.
References
----------
>>> oEditor.CreateUserDefinedPart
Examples
--------
>>> my_udp = self.aedtapp.modeler.create_udp(udp_dll_name="RMxprt/ClawPoleCore",
... udp_parameters_list=my_udpPairs,
... upd_library="syslib",
... udp_type="Solid")
<class 'pyaedt.modeler.Object3d.Object3d'>
"""
if ".dll" not in udp_dll_name:
vArg1 = [
"NAME:UserDefinedPrimitiveParameters",
"DllName:=",
udp_dll_name + ".dll",
"Library:=",
upd_library,
]
else:
vArg1 = ["NAME:UserDefinedPrimitiveParameters", "DllName:=", udp_dll_name, "Library:=", upd_library]
vArgParamVector = ["NAME:ParamVector"]
for pair in udp_parameters_list:
if isinstance(pair, list):
vArgParamVector.append(["NAME:Pair", "Name:=", pair[0], "Value:=", pair[1]])
else:
vArgParamVector.append(["NAME:Pair", "Name:=", pair.Name, "Value:=", pair.Value])
vArg1.append(vArgParamVector)
if name:
obj_name = name
else:
obj_name, ext = os.path.splitext(os.path.basename(udp_dll_name))
vArg2 = self._default_object_attributes(name=obj_name)
obj_name = self.oeditor.CreateUserDefinedPart(vArg1, vArg2)
return self._create_object(obj_name)
@pyaedt_function_handler()
def update_udp(self, object_name, operation_name, udp_parameters_list):
"""Update an existing geometrical object that was originally created using a user-defined primitive (UDP).
Parameters
----------
object_name : str
Name of the object to update.
operation_name : str
Name of the operation used to create the object.
udp_parameters_list : list
List of the UDP parameters to update and their value.
Returns
-------
bool
``True`` when successful.
References
----------
>>> oEditor.CreateUserDefinedPart
Examples
--------
>>> self.aedtapp.modeler.update_udp(object_name="ClawPoleCore",
... operation_name="CreateUserDefinedPart",
... udp_parameters_list=[["Length","110mm"], ["DiaGap","125mm"]])
True
"""
vArg1 = ["NAME:AllTabs"]
prop_servers = ["NAME:PropServers"]
prop_servers.append("{0}:{1}:1".format(object_name, operation_name))
cmd_tab = ["NAME:Geometry3DCmdTab"]
cmd_tab.append(prop_servers)
changed_props = ["NAME:ChangedProps"]
for pair in udp_parameters_list:
if isinstance(pair, list):
changed_props.append(["NAME:{0}".format(pair[0]), "Value:=", pair[1]])
else:
changed_props.append(["NAME:", pair.Name, "Value:=", pair.Value])
cmd_tab.append(changed_props)
vArg1.append(cmd_tab)
self.oeditor.ChangeProperty(vArg1)
return True
@pyaedt_function_handler()
def delete(self, objects=None):
"""Delete objects or groups.
Parameters
----------
objects : list, optional
List of objects or group names. The default is ``None``,
in which case all objects are deleted.
Returns
-------
bool
``True`` when successful, ``False`` when failed
References
----------
>>> oEditor.Delete
"""
if objects is None:
objects = self.object_names
objects = self._modeler.convert_to_selections(objects, return_list=True)
for el in objects:
if el not in self.object_names and not list(self.oeditor.GetObjectsInGroup(el)):
objects.remove(el)
if not objects:
self.logger.warning("No objects to delete")
return False
slice = min(100, len(objects))
num_objects = len(objects)
remaining = num_objects
while remaining > 0:
objs = objects[:slice]
objects_str = self._modeler.convert_to_selections(objs, return_list=False)
arg = ["NAME:Selections", "Selections:=", objects_str]
try:
self.oeditor.Delete(arg)
except:
self.logger.warning("Failed to delete {}.".format(objects_str))
remaining -= slice
if remaining > 0:
objects = objects[slice:]
self._refresh_object_types()
if len(objects) > 0:
self.cleanup_objects()
self.logger.info("Deleted {} Objects: {}.".format(num_objects, objects_str))
return True
@pyaedt_function_handler()
def delete_objects_containing(self, contained_string, case_sensitive=True):
"""Delete all objects with a given prefix.
Parameters
----------
contained_string : str
Prefix in the names of the objects to delete.
case_sensitive : bool, optional
Whether the prefix is case-senstive. The default is ``True``.
Returns
-------
bool
``True`` when successful, ``False`` when failed
References
----------
>>> oEditor.Delete
"""
objnames = self.object_id_dict
num_del = 0
for el in objnames:
if case_sensitive:
if contained_string in el:
self.delete(el)
num_del += 1
else:
if contained_string.lower() in el.lower():
self.delete(el)
num_del += 1
self.logger.info("Deleted %s objects", num_del)
return True
@pyaedt_function_handler()
def get_model_bounding_box(self):
"""Retrieve the model's bounding box.
Returns
-------
list
List of 6 float values ``[min_x, min_y, min_z, max_x, max_y, max_z]``
for the bounding box.
References
----------
>>> oEditor.GetModelBoundingBox
"""
return self._app.modeler.get_model_bounding_box()
@pyaedt_function_handler()
def get_obj_id(self, objname):
"""Return the object ID from an object name.
Parameters
----------
objname : str
Name of the object.
Returns
-------
int
Object ID.
"""
if objname in self.object_id_dict:
return self.object_id_dict[objname]
return None
@pyaedt_function_handler()
def get_object_from_name(self, objname):
"""Return the object from an object name.
Parameters
----------
objname : str
Name of the object.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object returned.
"""
if objname in self.object_id_dict:
id = self.get_obj_id(objname)
return self.objects[id]
@pyaedt_function_handler()
def get_objects_w_string(self, stringname, case_sensitive=True):
"""Retrieve all objects with a given string in their names.
Parameters
----------
stringname : str
String to search object names for.
case_sensitive : bool, optional
Whether the string is case-sensitive. The default is ``True``.
Returns
-------
list
List of object names with the given string.
"""
list_objs = []
for el in self.objects:
if case_sensitive:
if stringname in self.objects[el].name:
list_objs.append(self.objects[el].name)
else:
if stringname.lower() in self.objects[el].name.lower():
list_objs.append(self.objects[el].name)
return list_objs
@pyaedt_function_handler()
def refresh(self):
"""Refresh this object."""
self._solids = []
self._sheets = []
self._lines = []
self._points = []
self._point_names = {}
self._unclassified = []
self._all_object_names = []
self.objects = {}
self.object_id_dict = {}
self._currentId = 0
self.refresh_all_ids()
# self._refresh_all_ids_from_aedt_file()
@pyaedt_function_handler()
def cleanup_objects(self):
"""Clean up objects that no longer exist in the modeler because
they were removed by previous operations.
This method also updates object IDs that may have changed via
a modeler operation such as :func:`pyaedt.modeler.Model3D.Modeler3D.unite`
or :func:`pyaedt.modeler.Model2D.Modeler2D.unite`.
Returns
-------
dict
Dictionary of updated object IDs.
"""
new_object_dict = {}
new_object_id_dict = {}
all_objects = self.object_names
all_unclassified = self.unclassified_objects
for old_id, obj in self.objects.items():
if obj.name in all_objects or obj.name in all_unclassified:
updated_id = obj.id # By calling the object property we get the new id
new_object_id_dict[obj.name] = updated_id
new_object_dict[updated_id] = obj
self.objects = new_object_dict
self.object_id_dict = new_object_id_dict
@pyaedt_function_handler()
def remove_point(self, name):
"""Remove a point.
Parameters
----------
name : str
Name of the point to be removed.
Returns
-------
"""
self._points.remove(self.points_by_name[name])
del self.points_by_name[name]
@pyaedt_function_handler()
def find_new_objects(self):
"""Find any new objects in the modeler that were created
by previous operations.
Returns
-------
dict
Dictionary of new objects.
"""
new_objects = []
for obj_name in self.object_names:
if obj_name not in self.object_id_dict:
new_objects.append(obj_name)
return new_objects
@pyaedt_function_handler()
def add_new_objects(self):
"""Add objects that have been created in the modeler by
previous operations.
Returns
-------
list
List of added objects.
"""
added_objects = []
for obj_name in self.object_names:
if obj_name not in self.object_id_dict:
self._create_object(obj_name)
added_objects.append(obj_name)
return added_objects
# TODO Eliminate this - check about import_3d_cad
# Should no longer be a problem
@pyaedt_function_handler()
def refresh_all_ids(self):
"""Refresh all IDs."""
self.add_new_objects()
self.cleanup_objects()
return len(self.objects)
@pyaedt_function_handler()
def get_objects_by_material(self, materialname):
"""Retrieve a list of the IDs for objects of a specified material.
Parameters
----------
materialname : str
Name of the material.
Returns
-------
list
List of IDs for objects of the specified material.
References
----------
>>> oEditor.GetObjectsByMaterial
"""
obj_lst = []
for el in self.objects:
if (
self.objects[el].material_name == materialname
or self.objects[el].material_name == '"' + materialname + '"'
):
obj_lst.append(el)
return obj_lst
@pyaedt_function_handler()
def find_closest_edges(self, start_obj, end_obj, port_direction=0):
"""Retrieve the two closest edges that are not perpendicular for two objects.
Parameters
----------
start_obj : str
Name of the starting object.
end_obj : str
Name of the ending object.
port_direction : str, optional
Direction of the port to which to give edges precedence when more than two couples
are at the same distance. For example, for a coax or microstrip, precedence is given
to the edges that are on the given axis direction, such as ``"XNeg"``. Options are
``"XNeg"``, ``"XPos"``, ``"YNeg"``, ``"YPos`"``, ``"ZNeg"``, and ``"ZPos"``.
The default is ``0``.
Returns
-------
list
List with two edges if present.
"""
start_obj = self._resolve_object(start_obj)
end_obj = self._resolve_object(end_obj)
edge_start_list = None
edge_stop_list = None
if port_direction == 0:
if start_obj.bottom_face_x:
edge_start_list = start_obj.bottom_face_x.edges
if end_obj.bottom_face_x:
edge_stop_list = end_obj.bottom_face_x.edges
elif port_direction == 3:
if start_obj.top_face_x:
edge_start_list = start_obj.top_face_x.edges
if end_obj.top_face_x:
edge_stop_list = end_obj.top_face_x.edges
elif port_direction == 1:
if start_obj.bottom_face_y:
edge_start_list = start_obj.bottom_face_y.edges
if end_obj.bottom_face_y:
edge_stop_list = end_obj.bottom_face_y.edges
elif port_direction == 4:
if start_obj.top_face_y:
edge_start_list = start_obj.top_face_y.edges
if end_obj.top_face_y:
edge_stop_list = end_obj.top_face_y.edges
elif port_direction == 2:
if start_obj.bottom_face_z:
edge_start_list = start_obj.bottom_face_z.edges
if end_obj.bottom_face_z:
edge_stop_list = end_obj.bottom_face_z.edges
elif port_direction == 5:
if start_obj.top_face_z:
edge_start_list = start_obj.top_face_z.edges
if end_obj.top_face_z:
edge_stop_list = end_obj.top_face_z.edges
if not edge_start_list:
edge_start_list = start_obj.edges
if not edge_stop_list:
edge_stop_list = end_obj.edges
mindist = 1e6
tol = 1e-12
pos_tol = 1e-6
edge_list = []
actual_point = None
is_parallel = False
for el in edge_start_list:
vertices_i = el.vertices
vertex1_i = None
vertex2_i = None
if len(vertices_i) == 2: # normal segment edge
vertex1_i = vertices_i[0].position
vertex2_i = vertices_i[1].position
start_midpoint = el.midpoint
elif len(vertices_i) == 1:
# TODO why do we need this ?
start_midpoint = vertices_i[0].position
else:
continue
for el1 in edge_stop_list:
vertices_j = el1.vertices
vertex1_j = None
vertex2_j = None
if len(vertices_j) == 2: # normal segment edge
vertex1_j = vertices_j[0].position
vertex2_j = vertices_j[1].position
end_midpoint = el1.midpoint
elif len(vertices_j) == 1:
end_midpoint = vertices_j[0].position
else:
continue
parallel_edges = False
vect = None
if vertex1_i and vertex1_j:
if (
abs(
GeometryOperators._v_dot(
GeometryOperators.v_points(vertex1_i, vertex2_i),
GeometryOperators.v_points(vertex1_j, vertex2_j),
)
)
< tol
):
continue # skip perperndicular edges
if GeometryOperators.is_parallel(vertex1_i, vertex2_i, vertex1_j, vertex2_j):
parallel_edges = True
vert_dist_sum = GeometryOperators.arrays_positions_sum(
[vertex1_i, vertex2_i], [vertex1_j, vertex2_j]
)
vect = GeometryOperators.distance_vector(start_midpoint, vertex1_j, vertex2_j)
else:
vert_dist_sum = GeometryOperators.arrays_positions_sum([start_midpoint], [end_midpoint])
# dist = abs(_v_norm(vect))
if parallel_edges:
pd1 = GeometryOperators.points_distance(vertex1_i, vertex2_i)
pd2 = GeometryOperators.points_distance(vertex1_j, vertex2_j)
if pd1 < pd2 and not GeometryOperators.is_projection_inside(
vertex1_i, vertex2_i, vertex1_j, vertex2_j
):
continue
elif pd1 >= pd2 and not GeometryOperators.is_projection_inside(
vertex1_j, vertex2_j, vertex1_i, vertex2_i
):
continue
if actual_point is None:
edge_list = [el, el1]
is_parallel = parallel_edges
actual_point = GeometryOperators.find_point_on_plane([start_midpoint, end_midpoint], port_direction)
mindist = vert_dist_sum
else:
new_point = GeometryOperators.find_point_on_plane([start_midpoint, end_midpoint], port_direction)
if (port_direction <= 2 and new_point - actual_point < 0) or (
port_direction > 2 and actual_point - new_point < 0
):
edge_list = [el, el1]
is_parallel = parallel_edges
actual_point = new_point
mindist = vert_dist_sum
elif port_direction <= 2 and new_point - actual_point < tol and vert_dist_sum - mindist < pos_tol:
edge_list = [el, el1]
is_parallel = parallel_edges
actual_point = new_point
mindist = vert_dist_sum
elif port_direction > 2 and actual_point - new_point < tol and vert_dist_sum - mindist < pos_tol:
edge_list = [el, el1]
is_parallel = parallel_edges
actual_point = new_point
mindist = vert_dist_sum
return edge_list, is_parallel
@pyaedt_function_handler()
def get_equivalent_parallel_edges(self, edgelist, portonplane=True, axisdir=0, startobj="", endobject=""):
"""Create two new edges that are parallel and equal to the smallest edge given a parallel couple of edges.
Parameters
----------
edgelist : list
List of two parallel edges.
portonplane : bool, optional
Whether edges are to be on the plane orthogonal to the axis direction.
The default is ``True``.
axisdir : int, optional
Axis direction. Choices are ``0`` through ``5``. The default is ``0``.
startobj : str, optional
Name of the starting object. The default is ``""``.
endobject : str, optional
Name of the ending object. The default is ``""``.
Returns
-------
list
List of two created edges.
"""
if isinstance(edgelist[0], str):
edgelist[0] = self.get_object_from_name(edgelist[0])
if isinstance(edgelist[1], str):
edgelist[1] = self.get_object_from_name(edgelist[1])
l1 = edgelist[0].length
l2 = edgelist[1].length
if l1 < l2:
orig_edge = edgelist[0]
dest_edge = edgelist[1]
else:
orig_edge = edgelist[1]
dest_edge = edgelist[0]
first_edge = self.create_object_from_edge(orig_edge)
second_edge = self.create_object_from_edge(orig_edge)
ver1 = orig_edge.vertices
ver2 = dest_edge.vertices
if len(ver2) == 2:
p = ver1[0].position
a1 = ver2[0].position
a2 = ver2[1].position
vect = GeometryOperators.distance_vector(p, a1, a2)
if portonplane:
vect[divmod(axisdir, 3)[1]] = 0
# TODO: can we avoid this translate operation - is there another way to check ?
self.modeler.move(second_edge, vect)
p_check = second_edge.vertices[0].position
p_check2 = second_edge.vertices[1].position
# elif len(ver2) == 1: # for circular edges with one vertex
# p_check = first_edge.vertices[0].position
# p_check2 = second_edge.vertices[0].position
else:
self.delete(first_edge)
self.delete(second_edge)
return False
obj_check = self.get_bodynames_from_position(p_check)
obj_check2 = self.get_bodynames_from_position(p_check2)
# if (startobj in obj_check and endobject in obj_check2) or (startobj in obj_check2 and endobject in obj_check):
if (startobj in obj_check or endobject in obj_check) and (startobj in obj_check2 or endobject in obj_check2):
if l1 < l2:
return_edges = [first_edge, second_edge]
else:
return_edges = [second_edge, first_edge]
return return_edges
else:
self.delete(second_edge)
self.delete(first_edge)
return None
@pyaedt_function_handler()
def get_object_faces(self, partId):
"""Retrieve the face IDs of a given object ID or object name.
Parameters
----------
partId : int or str
Object ID or object name.
Returns
-------
List
List of faces IDs.
References
----------
>>> oEditor.GetFaceIDs
"""
oFaceIDs = []
if isinstance(partId, str) and partId in self.object_id_dict:
oFaceIDs = self.oeditor.GetFaceIDs(partId)
oFaceIDs = [int(i) for i in oFaceIDs]
elif partId in self.objects:
o = self.objects[partId]
name = o.name
oFaceIDs = self.oeditor.GetFaceIDs(name)
oFaceIDs = [int(i) for i in oFaceIDs]
return oFaceIDs
@pyaedt_function_handler()
def get_object_edges(self, partId):
"""Retrieve the edge IDs of a given object ID or object name.
Parameters
----------
partId : int or str
Object ID or object name.
Returns
-------
list
List of edge IDs.
References
----------
>>> oEditor.GetEdgeIDsFromObject
"""
oEdgeIDs = []
if isinstance(partId, str) and partId in self.object_id_dict:
oEdgeIDs = self.oeditor.GetEdgeIDsFromObject(partId)
oEdgeIDs = [int(i) for i in oEdgeIDs]
elif partId in self.objects:
o = self.objects[partId]
oEdgeIDs = self.oeditor.GetEdgeIDsFromObject(o.name)
oEdgeIDs = [int(i) for i in oEdgeIDs]
return oEdgeIDs
@pyaedt_function_handler()
def get_face_edges(self, partId):
"""Retrieve the edge IDs of a given face name or face ID.
Parameters
----------
partId : int or str
Object ID or object name.
Returns
-------
list
List of edge IDs.
References
----------
>>> oEditor.GetEdgeIDsFromFace
"""
oEdgeIDs = self.oeditor.GetEdgeIDsFromFace(partId)
oEdgeIDs = [int(i) for i in oEdgeIDs]
return oEdgeIDs
@pyaedt_function_handler()
def get_object_vertices(self, partID):
"""Retrieve the vertex IDs of a given object name or object ID.
Parameters
----------
partID : int or str
Object ID or object name.
Returns
-------
list
List of vertex IDs.
References
----------
>>> oEditor.GetVertexIDsFromObject
"""
oVertexIDs = []
if isinstance(partID, str) and partID in self.object_id_dict:
oVertexIDs = self.oeditor.GetVertexIDsFromObject(partID)
oVertexIDs = [int(i) for i in oVertexIDs]
elif partID in self.objects:
o = self.objects[partID]
oVertexIDs = self.oeditor.GetVertexIDsFromObject(o.name)
oVertexIDs = [int(i) for i in oVertexIDs]
return oVertexIDs
@pyaedt_function_handler()
def get_face_vertices(self, face_id):
"""Retrieve the vertex IDs of a given face ID or face name.
Parameters
----------
face_id : int or str
Object ID or object name, which is available
using the methods :func:`pyaedt.modeler.Primitives3D.Primitives3D.get_object_vertices`
or :func:`pyaedt.modeler.Primitives2D.Primitives2D.get_object_vertices`.
Returns
-------
list
List of vertex IDs.
References
----------
>>> oEditor.GetVertexIDsFromFace
"""
try:
oVertexIDs = self.oeditor.GetVertexIDsFromFace(face_id)
except:
oVertexIDs = []
else:
oVertexIDs = [int(i) for i in oVertexIDs]
return oVertexIDs
@pyaedt_function_handler()
def get_edge_length(self, edgeID):
"""Get the length of an edge.
Parameters
----------
edgeID : int
ID of the edge.
Returns
-------
type
Edge length.
"""
vertexID = self.get_edge_vertices(edgeID)
pos1 = self.get_vertex_position(vertexID[0])
if len(vertexID) < 2:
return 0
pos2 = self.get_vertex_position(vertexID[1])
length = GeometryOperators.points_distance(pos1, pos2)
return length
@pyaedt_function_handler()
def get_edge_vertices(self, edgeID):
"""Retrieve the vertex IDs of a given edge ID or edge name.
Parameters
----------
edgeID : int, str
Object ID or object name, which is available using the
methods :func:`pyaedt.modeler.Primitives3D.Primitives3D.get_object_vertices`
or :func:`pyaedt.modeler.Primitives2D.Primitives2D.get_object_vertices`.
Returns
-------
list
List of vertex IDs.
References
----------
>>> oEditor.GetVertexIDsFromEdge
"""
try:
oVertexIDs = self.oeditor.GetVertexIDsFromEdge(edgeID)
except:
oVertexIDs = []
else:
oVertexIDs = [int(i) for i in oVertexIDs]
return oVertexIDs
@pyaedt_function_handler()
def get_vertex_position(self, vertex_id):
"""Retrieve a vector of vertex coordinates.
Parameters
----------
vertex_id : int or str
ID or name of the vertex.
Returns
-------
list
List of ``[x, y, z]`` coordinates indicating the position.
References
----------
>>> oEditor.GetVertexPosition
"""
try:
pos = self.oeditor.GetVertexPosition(vertex_id)
except:
position = []
else:
position = [float(i) for i in pos]
return position
@pyaedt_function_handler()
def get_face_area(self, face_id):
"""Retrieve the area of a given face ID.
Parameters
----------
face_id : int
ID of the face.
Returns
-------
float
Value for the face area.
References
----------
>>> oEditor.GetFaceArea
"""
area = self.oeditor.GetFaceArea(face_id)
return area
@pyaedt_function_handler()
def get_face_center(self, face_id):
"""Retrieve the center position for a given planar face ID.
Parameters
----------
face_id : int
ID of the face.
Returns
-------
list
A list of ``[x, y, z]`` coordinates for the
planar face center position.
References
----------
>>> oEditor.GetFaceCenter
"""
try:
c = self.oeditor.GetFaceCenter(face_id)
except:
self.logger.warning("Non Planar Faces doesn't provide any Face Center")
return False
center = [float(i) for i in c]
return center
@pyaedt_function_handler()
def get_mid_points_on_dir(self, sheet, axisdir):
"""Retrieve midpoints on a given axis direction.
Parameters
----------
sheet :
axisdir : int
Axis direction. Choices are ``0`` through ``5``.
Returns
-------
type
"""
edgesid = self.get_object_edges(sheet)
id = divmod(axisdir, 3)[1]
midpoint_array = []
for ed in edgesid:
midpoint_array.append(self.get_edge_midpoint(ed))
point0 = []
point1 = []
for el in midpoint_array:
if not point0:
point0 = el
point1 = el
elif axisdir < 3 and el[id] < point0[id] or axisdir > 2 and el[id] > point0[id]:
point0 = el
elif axisdir < 3 and el[id] > point1[id] or axisdir > 2 and el[id] < point1[id]:
point1 = el
return point0, point1
@pyaedt_function_handler()
def get_edge_midpoint(self, partID):
"""Retrieve the midpoint coordinates of a given edge ID or edge name.
Parameters
----------
partID : int or str
Object ID or object name.
Returns
-------
list
List of midpoint coordinates. If the edge is not a segment with
two vertices, an empty list is returned.
"""
if isinstance(partID, str) and partID in self.object_id_dict:
partID = self.object_id_dict[partID]
if partID in self.objects and self.objects[partID].object_type == "Line":
vertices = self.get_object_vertices(partID)
else:
try:
vertices = self.get_edge_vertices(partID)
except:
vertices = []
if len(vertices) == 2:
vertex1 = self.get_vertex_position(vertices[0])
vertex2 = self.get_vertex_position(vertices[1])
midpoint = GeometryOperators.get_mid_point(vertex1, vertex2)
return list(midpoint)
elif len(vertices) == 1:
return list(self.get_vertex_position(vertices[0]))
else:
return []
@pyaedt_function_handler()
def get_bodynames_from_position(self, position, units=None):
"""Retrieve the names of the objects that are in contact with a given point.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates for the point.
units : str, optional
Units, such as ``"m"``. The default is ``None``, in which case the
model units are used.
Returns
-------
list
List of object names.
References
----------
>>> oEditor.GetBodyNamesByPosition
"""
XCenter, YCenter, ZCenter = self._pos_with_arg(position, units)
vArg1 = ["NAME:Parameters"]
vArg1.append("XPosition:="), vArg1.append(XCenter)
vArg1.append("YPosition:="), vArg1.append(YCenter)
vArg1.append("ZPosition:="), vArg1.append(ZCenter)
list_of_bodies = list(self.oeditor.GetBodyNamesByPosition(vArg1))
return list_of_bodies
@pyaedt_function_handler()
def get_edgeid_from_position(self, position, obj_name=None, units=None):
"""Get an edge ID from a position.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates for the position.
obj_name : str, optional
Name of the object. The default is ``None``, in which case all
objects are searched.
units : str, optional
Units for the position, such as ``"m"``. The default is ``None``,
in which case the model units are used.
Returns
-------
type
Edge ID of the first object touching this position.
"""
if isinstance(obj_name, str):
object_list = [obj_name]
else:
object_list = self.object_names
edgeID = -1
XCenter, YCenter, ZCenter = self._pos_with_arg(position, units)
vArg1 = ["NAME:EdgeParameters"]
vArg1.append("BodyName:="), vArg1.append("")
vArg1.append("XPosition:="), vArg1.append(XCenter)
vArg1.append("YPosition:="), vArg1.append(YCenter)
vArg1.append("ZPosition:="), vArg1.append(ZCenter)
for obj in object_list:
vArg1[2] = obj
try:
edgeID = int(self.oeditor.GetEdgeByPosition(vArg1))
return edgeID
except:
pass
@pyaedt_function_handler()
def get_edgeids_from_vertexid(self, vertexid, obj_name):
"""Retrieve edge IDs for a vertex ID.
Parameters
----------
vertexid : int
Vertex ID.
obj_name :
Name of the object.
Returns
-------
List
List of edge IDs for the vertex ID.
References
----------
>>> oEditor.GetEdgeIDsFromObject
>>> oEditor.GetVertexIDsFromEdge
"""
edgeID = []
edges = self.get_object_edges(obj_name)
for edge in edges:
vertx = self.get_edge_vertices(edge)
if vertexid in vertx:
edgeID.append(edge)
return edgeID
@pyaedt_function_handler()
def get_faceid_from_position(self, position, obj_name=None, units=None):
"""Retrieve a face ID from a position.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates for the position.
obj_name : str, optional
Name of the object. The default is ``None``, in which case all
objects are searched.
units : str, optional
Units, such as ``"m"``. The default is ``None``, in which case the
model units are used.
Returns
-------
int
Face ID of the first object touching this position.
References
----------
>>> oEditor.GetFaceByPosition
"""
if isinstance(obj_name, str):
object_list = [obj_name]
else:
object_list = self.object_names
XCenter, YCenter, ZCenter = self._pos_with_arg(position, units)
vArg1 = ["NAME:FaceParameters"]
vArg1.append("BodyName:="), vArg1.append("")
vArg1.append("XPosition:="), vArg1.append(XCenter)
vArg1.append("YPosition:="), vArg1.append(YCenter)
vArg1.append("ZPosition:="), vArg1.append(ZCenter)
for obj in object_list:
vArg1[2] = obj
try:
face_id = self.oeditor.GetFaceByPosition(vArg1)
return face_id
except:
# Not Found, keep looking
pass
@pyaedt_function_handler()
def get_edges_on_bounding_box(self, sheets, return_colinear=True, tol=1e-6):
"""Retrieve the edges of the sheets passed in the input that are lying on the bounding box.
This method creates new lines for the detected edges and returns the IDs of these lines.
If required, only colinear edges are returned.
Parameters
----------
sheets : int, str, or list
ID or name for one or more sheets.
return_colinear : bool, optional
Whether to return only colinear edges. The default is ``True``.
If ``False``, all edges on the bounding box are returned.
tol : float, optional
Geometric tolerance. The default is ``1e-6``.
Returns
-------
list
List of edge IDs lying on the bounding box.
"""
port_sheets = self._modeler.convert_to_selections(sheets, return_list=True)
bb = self._modeler.get_model_bounding_box()
candidate_edges = []
for p in port_sheets:
edges = self[p].edges
for edge in edges:
vertices = edge.vertices
v_flag = False
for vertex in vertices:
v = vertex.position
xyz_flag = 0
if abs(v[0] - bb[0]) < tol or abs(v[0] - bb[3]) < tol:
xyz_flag += 1
if abs(v[1] - bb[1]) < tol or abs(v[1] - bb[4]) < tol:
xyz_flag += 1
if abs(v[2] - bb[2]) < tol or abs(v[2] - bb[5]) < tol:
xyz_flag += 1
if xyz_flag >= 2:
v_flag = True
else:
v_flag = False
break
if v_flag:
candidate_edges.append(edge)
if not return_colinear:
return candidate_edges
selected_edges = []
for i, edge_i in enumerate(candidate_edges[:-1]):
vertex1_i = edge_i.vertices[0].position
midpoint_i = edge_i.midpoint
for j, edge_j in enumerate(candidate_edges[i + 1 :]):
midpoint_j = edge_j.midpoint
area = GeometryOperators.get_triangle_area(midpoint_i, midpoint_j, vertex1_i)
if area < tol**2:
selected_edges.extend([edge_i, edge_j])
break
selected_edges = list(set(selected_edges))
for edge in selected_edges:
self.create_object_from_edge(edge)
time.sleep(aedt_wait_time)
return selected_edges
@pyaedt_function_handler()
def get_edges_for_circuit_port_from_sheet(
self, sheet, XY_plane=True, YZ_plane=True, XZ_plane=True, allow_perpendicular=False, tol=1e-6
):
"""Retrieve two edge IDs that are suitable for a circuit port from a sheet.
One edge belongs to the sheet passed in the input, and the second edge
is the closest edge's coplanar to the first edge (aligned to the XY, YZ,
or XZ plane). This method creates new lines for the detected edges and returns
the IDs of these lines.
This method accepts one or more sheet objects as input,
while the method :func:`Primitives.get_edges_for_circuit_port`
accepts a face ID.
Parameters
----------
sheet : int, str, or list
ID or name for one or more sheets.
XY_plane : bool, optional
Whether the edge's pair are to be on the XY plane.
The default is ``True``.
YZ_plane : bool, optional
Whether the edge's pair are to be on the YZ plane.
The default is ``True``.
XZ_plane : bool, optional
Whether the edge's pair are to be on the XZ plane.
The default is ``True``.
allow_perpendicular : bool, optional
Whether the edge's pair are to be perpendicular.
The default is ``False``.
tol : float, optional
Geometric tolerance. The default is ``1e-6``.
Returns
-------
list
List of edge IDs.
"""
tol2 = tol**2
port_sheet = self._modeler.convert_to_selections(sheet, return_list=True)
if len(port_sheet) > 1:
return []
else:
port_sheet = port_sheet[0]
port_edges = self.get_object_edges(port_sheet)
# find the bodies to exclude
port_sheet_midpoint = self.get_face_center(self.get_object_faces(port_sheet)[0])
point = self._modeler.Position(*port_sheet_midpoint)
list_of_bodies = self.get_bodynames_from_position(point)
# select all edges
all_edges = []
solids = [s for s in self.solid_names if s not in list_of_bodies]
for solid in solids:
edges = self.get_object_edges(solid)
all_edges.extend(edges)
all_edges = list(set(all_edges)) # remove duplicates
# select edges coplanar to port edges (aligned to XY, YZ, or XZ plane)
ux = [1.0, 0.0, 0.0]
uy = [0.0, 1.0, 0.0]
uz = [0.0, 0.0, 1.0]
midpoints = {}
candidate_edges = []
for ei in port_edges:
vertices_i = self.get_edge_vertices(ei)
if len(vertices_i) == 1: # maybe a circle
vertex1_i = self.get_vertex_position(vertices_i[0])
area_i = self.get_face_area(self.get_object_faces(port_sheet)[0])
if area_i is None or area_i < tol2: # degenerated face
continue
center_i = self.get_face_center(self.get_object_faces(port_sheet)[0])
if not center_i: # non planar face
continue
radius_i = GeometryOperators.points_distance(vertex1_i, center_i)
area_i_eval = math.pi * radius_i**2
if abs(area_i - area_i_eval) < tol2: # it is a circle
vertex2_i = center_i
midpoints[ei] = center_i
else: # not a circle
continue
elif len(vertices_i) == 2: # normal segment edge
vertex1_i = self.get_vertex_position(vertices_i[0])
vertex2_i = self.get_vertex_position(vertices_i[1])
midpoints[ei] = self.get_edge_midpoint(ei)
else: # undetermined edge --> skip
continue
for ej in all_edges:
vertices_j = self.get_edge_vertices(ej)
if len(vertices_j) == 1: # edge is an arc, not supported
continue
elif len(vertices_j) == 2: # normal segment edge
vertex1_j = self.get_vertex_position(vertices_j[0])
vertex2_j = self.get_vertex_position(vertices_j[1])
else: # undetermined edge --> skip
continue
if (
not allow_perpendicular
and abs(
GeometryOperators._v_dot(
GeometryOperators.v_points(vertex1_i, vertex2_i),
GeometryOperators.v_points(vertex1_j, vertex2_j),
)
)
< tol
):
continue
normal1 = GeometryOperators.v_cross(
GeometryOperators.v_points(vertex1_i, vertex2_i), GeometryOperators.v_points(vertex1_i, vertex1_j)
)
normal1_norm = GeometryOperators.v_norm(normal1)
if YZ_plane and abs(abs(GeometryOperators._v_dot(normal1, ux)) - normal1_norm) < tol:
pass
elif XZ_plane and abs(abs(GeometryOperators._v_dot(normal1, uy)) - normal1_norm) < tol:
pass
elif XY_plane and abs(abs(GeometryOperators._v_dot(normal1, uz)) - normal1_norm) < tol:
pass
else:
continue
vec1 = GeometryOperators.v_points(vertex1_i, vertex2_j)
if abs(GeometryOperators._v_dot(normal1, vec1)) < tol2: # the 4th point is coplanar
candidate_edges.append(ej)
minimum_distance = tol**-1
selected_edges = []
for ei in midpoints:
midpoint_i = midpoints[ei]
for ej in candidate_edges:
midpoint_j = self.get_edge_midpoint(ej)
d = GeometryOperators.points_distance(midpoint_i, midpoint_j)
if d < minimum_distance:
minimum_distance = d
selected_edges = [ei, ej]
if selected_edges:
new_edge1 = self.create_object_from_edge(selected_edges[0])
time.sleep(aedt_wait_time)
new_edge2 = self.create_object_from_edge(selected_edges[1])
return selected_edges
else:
return []
pass
@pyaedt_function_handler()
def get_edges_for_circuit_port(
self, face_id, XY_plane=True, YZ_plane=True, XZ_plane=True, allow_perpendicular=False, tol=1e-6
):
"""Retrieve two edge IDs suitable for the circuit port.
One edge belongs to the face ID passed in the input, and the second edge
is the closest edge's coplanar to the first edge (aligned to the XY, YZ,
or XZ plane). This method creates new lines for the detected edges and returns
the IDs of these lines.
This method accepts a face ID in the input, while the `get_edges_for_circuit_port_from_port`
method accepts one or more sheet objects.
Parameters
----------
face_id :
ID of the face.
XY_plane : bool, optional
Whether the edge's pair are to be on the XY plane.
The default is ``True``.
YZ_plane : bool, optional
Whether the edge's pair are to be on the YZ plane.
The default is ``True``.
XZ_plane : bool, optional
Whether the edge's pair are to be on the XZ plane.
The default is ``True``.
allow_perpendicular : bool, optional
Whether the edge's pair are to be perpendicular.
The default is ``False``.
tol : float, optional
Geometric tolerance. The default is ``1e-6``.
Returns
-------
list
List of edge IDs.
"""
tol2 = tol**2
port_edges = self.get_face_edges(face_id)
# find the bodies to exclude
port_sheet_midpoint = self.get_face_center(face_id)
point = self._modeler.Position(port_sheet_midpoint)
list_of_bodies = self.get_bodynames_from_position(point)
# select all edges
all_edges = []
solids = [s for s in self.solid_names if s not in list_of_bodies]
for solid in solids:
edges = self.get_object_edges(solid)
all_edges.extend(edges)
all_edges = list(set(all_edges)) # remove duplicates
# select edges coplanar to port edges (aligned to XY, YZ, or XZ plane)
ux = [1.0, 0.0, 0.0]
uy = [0.0, 1.0, 0.0]
uz = [0.0, 0.0, 1.0]
midpoints = {}
candidate_edges = []
for ei in port_edges:
vertices_i = self.get_edge_vertices(ei)
if len(vertices_i) == 1: # maybe a circle
vertex1_i = self.get_vertex_position(vertices_i[0])
area_i = self.get_face_area(face_id)
if area_i is None or area_i < tol2: # degenerated face
continue
center_i = self.get_face_center(face_id)
if not center_i: # non planar face
continue
radius_i = GeometryOperators.points_distance(vertex1_i, center_i)
area_i_eval = math.pi * radius_i**2
if abs(area_i - area_i_eval) < tol2: # it is a circle
vertex2_i = center_i
midpoints[ei] = center_i
else: # not a circle
continue
elif len(vertices_i) == 2: # normal segment edge
vertex1_i = self.get_vertex_position(vertices_i[0])
vertex2_i = self.get_vertex_position(vertices_i[1])
midpoints[ei] = self.get_edge_midpoint(ei)
else: # undetermined edge --> skip
continue
for ej in all_edges:
vertices_j = self.get_edge_vertices(ej)
if len(vertices_j) == 1: # edge is an arc, not supported
continue
elif len(vertices_j) == 2: # normal segment edge
vertex1_j = self.get_vertex_position(vertices_j[0])
vertex2_j = self.get_vertex_position(vertices_j[1])
else: # undetermined edge --> skip
continue
if (
not allow_perpendicular
and abs(
GeometryOperators._v_dot(
GeometryOperators.v_points(vertex1_i, vertex2_i),
GeometryOperators.v_points(vertex1_j, vertex2_j),
)
)
< tol
):
continue
normal1 = GeometryOperators.v_cross(
GeometryOperators.v_points(vertex1_i, vertex2_i), GeometryOperators.v_points(vertex1_i, vertex1_j)
)
normal1_norm = GeometryOperators.v_norm(normal1)
if YZ_plane and abs(abs(GeometryOperators._v_dot(normal1, ux)) - normal1_norm) < tol:
pass
elif XZ_plane and abs(abs(GeometryOperators._v_dot(normal1, uy)) - normal1_norm) < tol:
pass
elif XY_plane and abs(abs(GeometryOperators._v_dot(normal1, uz)) - normal1_norm) < tol:
pass
else:
continue
vec1 = GeometryOperators.v_points(vertex1_i, vertex2_j)
if abs(GeometryOperators._v_dot(normal1, vec1)) < tol2: # the 4th point is coplanar
candidate_edges.append(ej)
minimum_distance = tol**-1
selected_edges = []
for ei in midpoints:
midpoint_i = midpoints[ei]
for ej in candidate_edges:
midpoint_j = self.get_edge_midpoint(ej)
d = GeometryOperators.points_distance(midpoint_i, midpoint_j)
if d < minimum_distance:
minimum_distance = d
selected_edges = [ei, ej]
if selected_edges:
new_edge1 = self.create_object_from_edge(selected_edges[0])
time.sleep(aedt_wait_time)
new_edge2 = self.create_object_from_edge(selected_edges[1])
return selected_edges
else:
return []
pass
@pyaedt_function_handler()
def get_closest_edgeid_to_position(self, position, units=None):
"""Get the edge ID closest to a given position.
Parameters
----------
position : list
List of ``[x,y,z]`` coordinates for the position.
units :
Units for the position, such as ``"m"``. The default is ``None``, which means the model units are used.
Returns
-------
int
Edge ID of the edge closest to this position.
"""
if isinstance(position, list):
position = self.modeler.Position(position)
bodies = self.get_bodynames_from_position(position, units)
# the function searches in all bodies, not efficient
face_id = self.get_faceid_from_position(position, obj_name=bodies[0], units=units)
edges = self.get_face_edges(face_id)
distance = 1e6
selected_edge = None
for edge in edges:
midpoint = self.get_edge_midpoint(edge)
if self.model_units == "mm" and units == "meter":
midpoint = [i / 1000 for i in midpoint]
elif self.model_units == "meter" and units == "mm":
midpoint = [i * 1000 for i in midpoint]
d = GeometryOperators.points_distance(midpoint, [position.X, position.Y, position.Z])
if d < distance:
selected_edge = int(edge)
distance = d
return selected_edge
@pyaedt_function_handler()
def _resolve_object(self, object):
if isinstance(object, Object3d):
return object
else:
return self[object]
@pyaedt_function_handler()
def _get_model_objects(self, model=True):
"""Retrieve all model objects.
Parameters
----------
model : bool, optional
Whether to retrieve all model objects. The default is ``True``. When ``False``,
all non-model objects are retrieved.
Returns
-------
list
List of retrieved objects.
"""
list_objs = []
for id, obj in self.objects.items():
if obj.model == model:
list_objs.append(obj.name)
return list_objs
@pyaedt_function_handler()
def _check_material(self, matname, defaultmatname):
"""Check for a material name.
If a material name exists, it is assigned. Otherwise, the material
specified as the default is assigned.
Parameters
----------
matname : str
Name of the material.
defaultmatname : str
Name of the default material to assign if ``metname`` does not exist.
Returns
-------
str or bool
String if a material name, Boolean if the material is a dielectric.
"""
if isinstance(matname, Material):
if self._app._design_type == "HFSS":
return matname.name, matname.is_dielectric()
else:
return matname.name, True
if matname:
if self._app.materials[matname]:
if self._app._design_type == "HFSS":
return self._app.materials[matname].name, self._app.materials[matname].is_dielectric()
else:
return self._app.materials[matname].name, True
else:
self.logger.warning("Material %s doesn not exists. Assigning default material", matname)
if self._app._design_type == "HFSS":
return defaultmatname, self._app.materials.material_keys[defaultmatname].is_dielectric()
else:
return defaultmatname, True
@pyaedt_function_handler()
def _refresh_solids(self):
test = list(self.oeditor.GetObjectsInGroup("Solids"))
if test is None or test is False:
assert False, "Get Solids is failing"
elif test is True:
self._solids = [] # In IronPython True is returned when no sheets are present
else:
self._solids = list(test)
self._all_object_names = self._solids + self._sheets + self._lines + self._points
@pyaedt_function_handler()
def _refresh_sheets(self):
test = list(self.oeditor.GetObjectsInGroup("Sheets"))
if test is None or test is False:
assert False, "Get Sheets is failing"
elif test is True:
self._sheets = [] # In IronPython True is returned when no sheets are present
else:
self._sheets = list(test)
self._all_object_names = self._solids + self._sheets + self._lines + self._points
@pyaedt_function_handler()
def _refresh_lines(self):
test = list(self.oeditor.GetObjectsInGroup("Lines"))
if test is None or test is False:
assert False, "Get Lines is failing"
elif test is True:
self._lines = [] # In IronPython True is returned when no lines are present
else:
self._lines = list(test)
self._all_object_names = self._solids + self._sheets + self._lines + self._points
@pyaedt_function_handler()
def _refresh_points(self):
test = list(self.oeditor.GetObjectsInGroup("Points"))
if test is None or test is False:
assert False, "Get Points is failing"
elif test is True:
self._points = [] # In IronPython True is returned when no points are present
else:
self._points = list(test)
self._all_object_names = self._solids + self._sheets + self._lines + self._points
@pyaedt_function_handler()
def _refresh_unclassified(self):
test = _retry_ntimes(10, self.oeditor.GetObjectsInGroup, "Unclassified")
if test is None or test is False:
self._unclassified = []
self.logger.debug("Unclassified is failing")
elif test is True:
self._unclassified = [] # In IronPython True is returned when no unclassified are present
else:
self._unclassified = list(test)
@pyaedt_function_handler()
def _refresh_object_types(self):
self._refresh_solids()
self._refresh_sheets()
self._refresh_lines()
self._refresh_points()
self._all_object_names = self._solids + self._sheets + self._lines + self._points
@pyaedt_function_handler()
def _create_object(self, name):
o = Object3d(self, name)
new_id = o.id
self.objects[new_id] = o
self.object_id_dict[o.name] = new_id
return o
@pyaedt_function_handler()
def _create_point(self, name):
point = Point(self, name)
self._point_names[name] = point
self._points.append(point)
return point
@pyaedt_function_handler()
def _refresh_all_ids_from_aedt_file(self):
if not self._app.design_properties or "ModelSetup" not in self._app.design_properties:
return False
try:
groups = self._app.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"]["Groups"]["Group"]
except KeyError:
groups = []
if not isinstance(groups, list):
groups = [groups]
try:
self._app.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"]["ToplevelParts"][
"GeometryPart"
]
except KeyError:
return 0
for el in self._app.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"]["ToplevelParts"][
"GeometryPart"
]:
if isinstance(el, (OrderedDict, dict)):
attribs = el["Attributes"]
else:
attribs = self._app.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"ToplevelParts"
]["GeometryPart"]["Attributes"]
if attribs["Name"] in self._all_object_names:
o = self._create_object(name=attribs["Name"])
o._part_coordinate_system = attribs["PartCoordinateSystem"]
if "NonModel" in attribs["Flags"]:
o._model = False
else:
o._model = True
if "Wireframe" in attribs["Flags"]:
o._wireframe = True
else:
o._wireframe = False
groupname = ""
for group in groups:
if attribs["GroupId"] == group["GroupID"]:
groupname = group["Attributes"]["Name"]
o._m_groupName = groupname
try:
o._color = tuple(int(x) for x in attribs["Color"][1:-1].split(" "))
except:
o._color = None
o._surface_material = attribs.get("SurfaceMaterialValue", None)
if o._surface_material:
o._surface_material = o._surface_material[1:-1].lower()
if "MaterialValue" in attribs:
o._material_name = attribs["MaterialValue"][1:-1].lower()
else:
o._material_name = attribs.get("MaterialName", None)
o._is_updated = True
return len(self.objects)
@pyaedt_function_handler()
def _default_object_attributes(self, name=None, matname=None, flags=""):
if not matname:
matname = self.defaultmaterial
material, is_dielectric = self._check_material(matname, self.defaultmaterial)
solve_inside = False
if is_dielectric:
solve_inside = True
if not name:
name = _uname()
try:
color = str(tuple(self._app.materials.material_keys[material].material_appearance)).replace(",", " ")
except:
color = "(132 132 193)"
if material in ["vacuum", "air", "glass", "water_distilled", "water_fresh", "water_sea"]:
transparency = 0.8
else:
transparency = 0.2
args = [
"NAME:Attributes",
"Name:=",
name,
"Flags:=",
flags,
"Color:=",
color,
"Transparency:=",
transparency,
"PartCoordinateSystem:=",
"Global",
"SolveInside:=",
solve_inside,
]
if self.version >= "2019.3":
args += [
"MaterialValue:=",
chr(34) + material + chr(34),
"UDMId:=",
"",
"SurfaceMaterialValue:=",
chr(34) + "Steel-oxidised-surface" + chr(34),
]
else:
args += ["MaterialName:=", material]
if self.version >= "2021.2":
args += [
"ShellElement:=",
False,
"ShellElementThickness:=",
"0mm",
"IsMaterialEditable:=",
True,
"UseMaterialAppearance:=",
False,
"IsLightweight:=",
False,
]
return args
@pyaedt_function_handler()
def _crosssection_arguments(self, type, orient, width, topwidth, height, num_seg, bend_type=None):
"""Generate the properties array for the polyline cross-section."""
arg_str = ["NAME:PolylineXSection"]
# Set the default section type to "None"
section_type = type
if not section_type:
section_type = "None"
# Set the default orientation to "Auto"
section_orient = orient
if not section_orient:
section_orient = "Auto"
# Set the default bend-type to "Corner"
section_bend = bend_type
if not section_bend:
section_bend = "Corner"
# Ensure number-of segments is valid
if num_seg:
assert num_seg > 2, "Number of segments for a cross-section must be 0 or greater than 2."
model_units = self.model_units
arg_str += ["XSectionType:=", section_type]
arg_str += ["XSectionOrient:=", section_orient]
arg_str += ["XSectionWidth:=", _dim_arg(width, model_units)]
arg_str += ["XSectionTopWidth:=", _dim_arg(topwidth, model_units)]
arg_str += ["XSectionHeight:=", _dim_arg(height, model_units)]
arg_str += ["XSectionNumSegments:=", "{}".format(num_seg)]
arg_str += ["XSectionBendType:=", section_bend]
return arg_str
@pyaedt_function_handler()
def _arg_with_dim(self, value, units=None):
if isinstance(value, str):
val = value
else:
if units is None:
units = self.model_units
val = "{0}{1}".format(value, units)
return val
@pyaedt_function_handler()
def _pos_with_arg(self, pos, units=None):
xpos = self._arg_with_dim(pos[0], units)
if len(pos) < 2:
ypos = self._arg_with_dim(0, units)
else:
ypos = self._arg_with_dim(pos[1], units)
if len(pos) < 3:
zpos = self._arg_with_dim(0, units)
else:
zpos = self._arg_with_dim(pos[2], units)
return xpos, ypos, zpos
@pyaedt_function_handler()
def _str_list(self, theList):
szList = ""
for id in theList:
o = self.objects[id]
if len(szList):
szList += ","
szList += str(o.name)
return szList
@pyaedt_function_handler()
def _find_object_from_edge_id(self, lval):
objList = []
objListSheets = self.sheet_names
if len(objListSheets) > 0:
objList.extend(objListSheets)
objListSolids = self.solid_names
if len(objListSolids) > 0:
objList.extend(objListSolids)
for obj in objList:
val = _retry_ntimes(10, self.oeditor.GetEdgeIDsFromObject, obj)
if not (isinstance(val, bool)) and str(lval) in list(val):
return obj
return None
@pyaedt_function_handler()
def _find_object_from_face_id(self, lval):
if self.oeditor is not None:
objList = []
objListSheets = self.sheet_names
if len(objListSheets) > 0:
objList.extend(objListSheets)
objListSolids = self.solid_names
if len(objListSolids) > 0:
objList.extend(objListSolids)
for obj in objList:
face_ids = list(self.oeditor.GetFaceIDs(obj))
if str(lval) in face_ids:
return obj
return None
@pyaedt_function_handler()
def __getitem__(self, partId):
"""Return the object ``Object3D`` for a given object ID or object name.
Parameters
----------
partId : int or str
Object ID or object name from the 3D modeler.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
Returns None if the part ID or the object name is not found.
"""
if isinstance(partId, int) and partId in self.objects:
return self.objects[partId]
elif partId in self.object_id_dict:
return self.objects[self.object_id_dict[partId]]
elif isinstance(partId, Object3d):
return partId
return None
| python | 124,715 |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import os
import numpy as np
from extensions.back.TopKNormalizer import TopKNormalizer
from extensions.ops.Cast import Cast
from extensions.ops.ReduceOps import ReduceOp
from extensions.ops.activation_ops import Activation
from extensions.ops.elementwise import Elementwise, LogicalElementwise, BiasAdd, Div, Mul, Pow, Sub
from extensions.ops.embedding_bag import EmbeddingBagBase
from extensions.ops.psroipooling import DeformablePSROIPoolingOp
from extensions.ops.scatter import Scatter
from extensions.ops.scatternd import ScatterNDBase
from extensions.ops.split import Split, VariadicSplit
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Graph, Node
from mo.ops.clamp import AttributedClamp
from mo.ops.convolution import Convolution
from mo.ops.deconvolution import Deconvolution
from mo.ops.op import Op
from mo.ops.pooling import Pooling
from mo.ops.result import Result
from mo.utils.class_registration import update_registration
from mo.utils.import_extensions import import_by_path
from mo.utils.ir_reader.extender import Extender
# Operations not registered in collect_ops() function
custom_ops = {
'AvgPool': Pooling,
'BiasAdd': BiasAdd,
'Convert': Cast,
'ConvolutionBackpropData': Deconvolution,
'DeformablePSROIPooling': DeformablePSROIPoolingOp,
'Divide': Div,
'GroupConvolution': Convolution,
'GroupConvolutionBackpropData': Deconvolution,
'MaxPool': Pooling,
'Multiply': Mul,
'Power': Pow,
'Split': Split,
'Subtract': Sub,
'VariadicSplit': VariadicSplit,
'Clamp': AttributedClamp,
}
def collect_ops(path: str):
"""
A function to registrate all MO ops
:param path: Path to Model Optimizer folder
:return:
"""
import_by_path(os.path.join(path, 'mo', 'ops'), ['mo', 'ops'])
import_by_path(os.path.join(path, 'extensions', 'ops'), ['extensions', 'ops'])
update_registration(classes=[Op, Activation, Elementwise, EmbeddingBagBase,
LogicalElementwise, ReduceOp, Scatter, ScatterNDBase],
enabled_transforms=[], disabled_transforms=[])
def collect_extenders(path: str):
"""
A function to registrate all MO IR Reader extenders
:param path: Path to Model Optimizer folder
:return:
"""
import_by_path(os.path.join(path, 'mo', 'utils', 'ir_reader', 'extenders'),
['mo', 'utils', 'ir_reader', 'extenders'])
update_registration(classes=[Extender], enabled_transforms=[], disabled_transforms=[])
def collect_node_outputs(node: Node) -> dict:
"""
Function to collects output connections of node.
:param node: node to collect connections
:return: dictionary of the form {out_port: [(input_port, destination_node_id)]}
"""
result = dict()
for out_port_idx, out_port in node.out_ports().items():
dest_info = []
for d in out_port.get_destinations():
dest_info.append((d.idx, d.node.id))
result[out_port_idx] = dest_info
return result
def restore_correct_ports(graph: Graph):
"""
Function renumbers from IE to MO port numbering and add ports to all nodes in graph.
:param graph:
:return:
"""
for node_id, attrs in graph.nodes(data=True):
if '_in_ports' not in attrs:
attrs['_in_ports'] = set()
if '_out_ports' not in attrs:
attrs['_out_ports'] = set()
for u, v, k, d in graph.edges(data=True, keys=True):
from_node_attrs = graph.node[u]
to_node_attrs = graph.node[v]
is_control_flow = 'control_flow_edge' in d and d['control_flow_edge'] is True
if 'in' in d:
in_port_id = d['in'] if not is_control_flow else 'control_flow_' + str(d['in'])
to_node_attrs['_in_ports'].update({in_port_id: {'control_flow': is_control_flow}})
if 'out' in d:
num_of_in_nodes = len(Node(graph, u).in_nodes())
decremented_number = d['out'] - num_of_in_nodes
out_port_id = decremented_number if not is_control_flow else 'control_flow_' + str(decremented_number)
from_node_attrs['_out_ports'].update({out_port_id: {'control_flow': is_control_flow}})
d['out'] = decremented_number
def propagate_const_values(op: Node):
"""
Function propagates const value from input data node and reshape it to correct shape.
:param op:
:return:
"""
assert op.soft_get('type') == 'Const', 'Wrong operation type, {} instead of Const!' \
''.format(op.soft_get('type'))
in_data_node = op.in_node()
out_data_node = op.out_node()
value = in_data_node.value
assert len(op.out_node(0).out_nodes()) > 0, 'Const node {} have no consumers.'.format(op.soft_get('name'))
if op.out_node(0).out_node(0).type == 'BinaryConvolution':
# Unpack binary weights for binary convolution (revert PackBinaryWeights transformation)
weights_rounded = np.unpackbits(value)
weights_rounded.dtype = np.int8
for elem in range(len(weights_rounded)):
if weights_rounded[elem] == 0:
weights_rounded[elem] -= 1 # pylint: disable=unsupported-assignment-operation
assert len(weights_rounded) % 8 == 0
weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8]) # pylint: disable=no-member
weights_rounded = np.flip(weights_rounded, axis=1)
value = weights_rounded.flatten()
op['shape'] = out_data_node.shape
# Reshape data node value for correct shape
op['value'] = np.reshape(value, op.shape)
def groupconv_to_conv(op: Node):
"""
Function makes GroupConv op back to Conv op with weights reshaping
:param op:
:return:
"""
assert op.soft_get('type') == 'GroupConvolution', \
'Wrong operation type, {} instead of GroupConvolution!'.format(op.soft_get('type'))
weights_shape = op.in_port(1).data.get_shape()
group = weights_shape[0]
new_shape = [weights_shape[1] * group, *weights_shape[2:]]
weights_node = op.in_port(1).get_source().node
if weights_node.type == 'Const':
weights_node.value = np.reshape(weights_node.value, new_shape)
elif weights_node.type == 'Reshape':
# we remove reshape node added in ConvolutionWithGroupsResolver pass
assert weights_node.in_port(0).get_source().data.get_shape() == new_shape, \
'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name)
op.in_port(1).disconnect()
weights_node.in_port(0).get_source().get_connection().set_destination(op.in_port(1))
else:
assert op.in_port(1).get_source().data.get_shape() == new_shape, \
'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name)
# we need to set this attrs for correct shape infer as convolution
op['group'] = group
op.type = 'Convolution'
def backprop_to_deconv(op: Node):
"""
Function changes BackpropData operations type to correct creation
:param op:
:return:
"""
assert op.soft_get('type') in ('ConvolutionBackpropData', 'GroupConvolutionBackpropData'), \
'Wrong operation type, {} instead of ConvolutionBackpropData/GroupConvolutionBackpropData!' \
''.format(op.soft_get('type'))
if op.has_valid('output_padding'):
# In this case we need to create Deconvolution as Convolution
op['type_to_create'] = 'Convolution'
op['old_input_shapes'] = list()
for n in op.in_nodes():
op.old_input_shapes.append(int64_array(op.in_node(n).shape))
def ti_add_edge_attrs(op: Node):
"""
Function adds necessary edge attrs in TensorIterator node
:param op:
:return:
"""
assert op.soft_get('type') == 'TensorIterator', 'Wrong operation type, {} instead of TensorIterator!' \
''.format(op.soft_get('type'))
i = 0
for num in range(len(op.in_ports())):
op.in_port(num).external_port_id = i
i += 1
for num in range(len(op.out_ports())):
op.out_port(num).external_port_id = i
i += 1
def assign_add_output_result(op: Node):
"""
Function adds necessary output result node for Assign node
:param op:
:return:
"""
assert op.soft_get('type') == 'Assign', 'Wrong operation type, {} instead of Assign!' \
''.format(op.soft_get('type'))
tmp_result = Result(op.graph, {'name': op.soft_get('name', op.id) + '/Result'}).create_node()
op.out_port(0).connect(tmp_result.in_port(0))
def copy_input_blobs(op: Node, copy_op: Node):
"""
Function copy input blob data nodes from restored graph to copied one
:param op: Node from restored graph
:param copy_op: Node from copied graph
:return:
"""
for u, d in op.get_sorted_inputs():
if 'bin' in d:
Op.create_and_connect_input_data_node(copy_op.graph, copy_op,
{'value': op.in_node(d['in']).value,
'shape': op.in_node(d['in']).shape}, d)
# Map with preprocessing functions
preprocessing_op_nodes = {
'Const': propagate_const_values,
'GroupConvolution': groupconv_to_conv,
'ConvolutionBackpropData': backprop_to_deconv,
'GroupConvolutionBackpropData': backprop_to_deconv,
}
# Map with postprocessing functions for nodes
postprocessing_op_nodes = {
'Assign': assign_add_output_result,
'TensorIterator': ti_add_edge_attrs,
'TopK': TopKNormalizer.normalize_outputs,
}
def copy_graph_with_ops(graph: Graph) -> Graph:
"""
Function to copy graph and apply extenders to appropriate nodes
:param graph: Graph to copy
:return:Copied graph with applied extenders
"""
new_graph = Graph()
new_graph.stage = 'back'
new_graph.graph = graph.graph
node_connections = dict()
mapping_of_old_idx_into_new = dict()
restore_correct_ports(graph)
# Nodes preprocessing stage in source graph
# Firstly propagate values only for Const nodes, because other preprocessings
# assumes Const nodes are already preprocessed.
for op in graph.get_op_nodes(type='Const'):
preprocessing_op_nodes[op.type](op)
for op in graph.get_op_nodes():
if op.soft_get('type') != 'Const' and op.soft_get('type') in preprocessing_op_nodes:
preprocessing_op_nodes[op.type](op)
# Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
for op in graph.get_op_nodes():
# Apply extenders to nodes in source graph
if op.type in Extender.registered_ops:
Extender.get_extender_class_by_name(op.type).extend(op)
else:
log.debug('Extender for node {} with type={} not found, please note.'.format(op.name, op.type))
# Add node with necessary type and extended attrs in new graph
op_type = op.soft_get('type_to_create', op.type)
if op_type in custom_ops:
node = custom_ops[op_type](new_graph, op.attrs()).create_node()
else:
assert op_type in Op.registered_ops, 'Operation {} not found in MO operations, ' \
'please check it!'.format(op_type)
node = Op.get_op_class_by_name(op_type)(new_graph, op.attrs()).create_node()
if op.has_and_set('need_copy_input_blobs'):
copy_input_blobs(op, node)
# Collect node connections
mapping_of_old_idx_into_new[op.id] = node.id
node_connections[op.id] = collect_node_outputs(op)
# Restore connections in new graph
for input_node_idx, its_outputs in list(node_connections.items()):
for out_port_idx, out_port_dest in its_outputs.items():
for dest_in_port_idx, dest_node_idx in out_port_dest:
src = Node(new_graph, mapping_of_old_idx_into_new[input_node_idx])
dst = Node(new_graph, mapping_of_old_idx_into_new[dest_node_idx])
src.out_port(out_port_idx).connect(dst.in_port(dest_in_port_idx))
# Nodes postprocessing stage in new graph
for op in new_graph.get_op_nodes():
if op.soft_get('type') in postprocessing_op_nodes:
postprocessing_op_nodes[op.type](op)
# clean up graph to shape inference
new_graph.clean_up()
return new_graph
| python | 13,084 |
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect SDK.
# Copyright (c) 2019-2020 Ingram Micro. All Rights Reserved.
from .activation import Activation
from .activation_template_response import ActivationTemplateResponse
from .activation_tile_response import ActivationTileResponse
from .agreement import Agreement
from .agreement_stats import AgreementStats
from .anniversary import Anniversary
from .asset_request import AssetRequest
from .asset import Asset
from .base import BaseModel
from .billing import Billing
from .commitment import Commitment
from .company import Company
from .configuration import Configuration
from .connection import Connection
from .constraints import Constraints
from .contact import Contact
from .contact_info import ContactInfo
from .contract import Contract
from .country import Country
from .conversation import Conversation
from .conversation_message import ConversationMessage
from .customer_ui_settings import CustomerUiSettings
from .document import Document
from .download_link import DownloadLink
from .event import Event
from .events import Events
from .ext_id_hub import ExtIdHub
from .fulfillment import Fulfillment
from .hub import Hub
from .hub_instance import HubInstance
from .hub_stats import HubStats
from .item import Item
from .last_request import LastRequest
from .marketplace import Marketplace
from .param import Param
from .phone_number import PhoneNumber
from .product import Product
from .product_category import ProductCategory
from .product_configuration import ProductConfiguration
from .product_configuration_parameter import ProductConfigurationParameter
from .product_family import ProductFamily
from .product_stats import ProductStats
from .product_stats_info import ProductStatsInfo
from .renewal import Renewal
from .server_error_response import ServerErrorResponse
from .stat import Stat
from .stats import Stats
from .template import Template
from .tier_account import TierAccount
from .tier_accounts import TierAccounts
from .tier_account_request import TierAccountRequest
from .tier_config import TierConfig
from .tier_config_request import TierConfigRequest
from .ui import UI
from .unit import Unit
from .usage_file import UsageFile
from .usage_listing import UsageListing
from .usage_record import UsageRecord
from .usage_records import UsageRecords
from .usage_stats import UsageStats
from .user import User
from .value_choice import ValueChoice
from .billing_request import BillingRequest
from .period import Period
from .attributes import Attributes
from .recurring_asset import RecurringAsset
__all__ = [
'Activation',
'ActivationTemplateResponse',
'ActivationTileResponse',
'Agreement',
'AgreementStats',
'Anniversary',
'AssetRequest',
'Asset',
'Attributes',
'BaseModel',
'Billing',
'BillingRequest',
'Company',
'Configuration',
'Commitment',
'Connection',
'Constraints',
'Contact',
'ContactInfo',
'Contract',
'Country',
'Conversation',
'ConversationMessage',
'CustomerUiSettings',
'Document',
'DownloadLink',
'Event',
'Events',
'ExtIdHub',
'Fulfillment',
'Hub',
'HubInstance',
'HubStats',
'Item',
'LastRequest',
'Marketplace',
'Param',
'Period',
'PhoneNumber',
'Product',
'ProductCategory',
'ProductConfiguration',
'ProductConfigurationParameter',
'ProductFamily',
'ProductStats',
'ProductStatsInfo',
'RecurringAsset',
'Renewal',
'ServerErrorResponse',
'Stat',
'Stats',
'Template',
'TierAccount',
'TierAccountRequest',
'TierAccounts',
'TierConfig',
'TierConfigRequest',
'UI',
'Unit',
'UsageFile',
'UsageListing',
'UsageRecord',
'UsageRecords',
'UsageStats',
'User',
'ValueChoice',
]
| python | 3,864 |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class SortMethod(BaseSchema):
# Feedback swagger.json
name = fields.Str(required=False)
selected = fields.Boolean(required=False)
type = fields.Str(required=False)
| python | 377 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetDatabaseAccountGremlinDatabaseResult',
'AwaitableGetDatabaseAccountGremlinDatabaseResult',
'get_database_account_gremlin_database',
]
@pulumi.output_type
class GetDatabaseAccountGremlinDatabaseResult:
"""
An Azure Cosmos DB Gremlin database.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, rid=None, tags=None, ts=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rid and not isinstance(rid, str):
raise TypeError("Expected argument 'rid' to be a str")
pulumi.set(__self__, "rid", rid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if ts and not isinstance(ts, dict):
raise TypeError("Expected argument 'ts' to be a dict")
pulumi.set(__self__, "ts", ts)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A system generated property representing the resource etag required for optimistic concurrency control.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rid(self) -> Optional[str]:
"""
A system generated property. A unique identifier.
"""
return pulumi.get(self, "rid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def ts(self) -> Optional[Any]:
"""
A system generated property that denotes the last updated timestamp of the resource.
"""
return pulumi.get(self, "ts")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseAccountGremlinDatabaseResult(GetDatabaseAccountGremlinDatabaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseAccountGremlinDatabaseResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
rid=self.rid,
tags=self.tags,
ts=self.ts,
type=self.type)
def get_database_account_gremlin_database(account_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseAccountGremlinDatabaseResult:
"""
An Azure Cosmos DB Gremlin database.
:param str account_name: Cosmos DB database account name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20150401:getDatabaseAccountGremlinDatabase', __args__, opts=opts, typ=GetDatabaseAccountGremlinDatabaseResult).value
return AwaitableGetDatabaseAccountGremlinDatabaseResult(
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
rid=__ret__.rid,
tags=__ret__.tags,
ts=__ret__.ts,
type=__ret__.type)
| python | 5,901 |
# loop inside loop - O(n2)
def anagramDetection(string1, string2):
l2 = list(string2)
isAnagram = True
pos1 = 0
while pos1 < len(string1) and isAnagram:
character = string1[pos1]
characterFound = False
pos2 = 0
while pos2 < len(l2) and not characterFound:
if character == l2[pos2]:
l2[pos2] = None
characterFound = True
else:
pos2 += 1
if not characterFound:
isAnagram = False
else:
pos1 += 1
return isAnagram
print(anagramDetection("python", "typhon"))
print(anagramDetection("baba", "abba"))
print(anagramDetection("babb", "abba"))
| python | 704 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Pyro4 server batch submission system.
"""
from __future__ import print_function
import os as _os
import sys
import errno as _errno
# import atexit as _atexit
import signal as _signal
import socket as _socket # Used to get hostname
import getpass as _getpass # Used to get usernames for queue
import argparse as _argparse
import subprocess
import multiprocessing as mp
from time import sleep as _sleep
from datetime import datetime as _dt
from datetime import timedelta as _td
from collections import OrderedDict as _OD
try:
from Queue import Empty
except ImportError: # python3
from queue import Empty
from six import text_type as _txt
from six import string_types as _str
from six import integer_types as _int
import psutil as _psutil
import Pyro4
# Fyrd imports (not necessary for main functionality) no relative imports
# as we run as a script also
from fyrd import run as _run
from fyrd import conf as _conf
from fyrd import logme as _logme
from fyrd import options as _options
from fyrd import script_runners as _scrpts
from fyrd import submission_scripts as _sscrpt
_Script = _sscrpt.Script
# For SQLAlchemy DB
Base = _base()
#############################
# User Editable Constants #
#############################
PID = None
RUN_DIR = _conf.CONFIG_PATH
PID_FILE = _os.path.join(RUN_DIR, 'local_queue.pid')
URI_FILE = _os.path.join(RUN_DIR, 'local_queue.uri')
DATABASE = _os.path.join(RUN_DIR, 'local_queue.db')
# Time to block between looping in job_runner
SLEEP_LEN = 0.1
# Time in seconds to wait for QueueManager to terminate
STOP_WAIT = 5
# Number of days to wait before cleaning out old jobs, obtained from the
# config file, this just sets the default
CLEAN_OLDER_THAN = 7
# Default max jobs, can be overriden by config file or QueueManager init
MAX_JOBS = mp.cpu_count()-1
MAX_JOBS = MAX_JOBS if MAX_JOBS >= 0 else 1
############################
# Do Not Edit Below Here #
############################
_WE_ARE_A_SERVER = False
# Fyrd file prefix, we don't use file commands so this is empty
PREFIX = ''
# This will be appended to job submission scripts
SUFFIX = 'job'
# Reset broken multithreading
# Some of the numpy C libraries can break multithreading, this command
# fixes the issue.
try:
subprocess.check_output(
"taskset -p 0xff %d >/dev/null 2>/dev/null" % _os.getpid(), shell=True
)
except subprocess.CalledProcessError:
pass # This doesn't work on Macs or Windows
###############################################################################
# Exception #
###############################################################################
class QueueError(Exception):
"""Generic Exception for handling errors."""
pass
###############################################################################
# Database #
###############################################################################
class Job(Base):
"""A Job record for every queued job.
Attributes
----------
jobno : int
Job ID
name : str
The name of the job
command : str
A full shell script that can be executed
state : {'queued', 'running', 'completed', 'failed'}
The current state of the job
threads : int
The requested number of cores
exitcode : int
The exit code of the job if state in {'completed', 'failed'}
pid : int
The pid of the process
runpath : str, optional
Path to the directory to run in
outfile, errfile : str, optional
Paths to the output files
"""
__tablename__ = 'jobs'
jobno = _Column(_Integer, primary_key=True, index=True)
name = _Column(_String, nullable=False)
command = _Column(_String, nullable=False)
submit_time = _Column(_DateTime, nullable=False)
threads = _Column(_Integer, nullable=False)
state = _Column(_String, nullable=False, index=True)
exitcode = _Column(_Integer)
pid = _Column(_Integer)
runpath = _Column(_String)
outfile = _Column(_String)
errfile = _Column(_String)
def __repr__(self):
"""Display summary."""
return 'LocalQueueJob<{0}:{1};{2};state:{3};exitcode:{4}>'.format(
self.jobno, self.pid, self.name, self.state, self.exitcode
)
class LocalQueue(object):
"""A database to hold job information.
Should only be accessed by a running QueueManager daemon.
"""
db_file = DATABASE
def __init__(self, db_file=None):
"""Attach to a database, create if does not exist."""
db_file = db_file if db_file else self.db_file
self.db_file = _os.path.abspath(db_file)
self.engine = _create_engine(
'sqlite:///{}?check_same_thread=False'.format(self.db_file)
)
if not _os.path.isfile(self.db_file):
self.create_database(confirm=False)
##########################################################################
# Basic Connectivity #
##########################################################################
def get_session(self):
"""Return session for this database."""
session_factory = _sessionmaker(bind=self.engine)
Session = _scoped_session(session_factory)
return Session()
@property
def session(self):
"""Simple wrapper for a new session."""
return self.get_session()
##########################################################################
# Querying #
##########################################################################
def query(self, *args):
"""Wrapper for the SQLAlchemy query method of session.
Parameters
----------
args
Any arguments allowed by session.query. If blank, Job is used,
which will return the whole database. To limit by columns, simply
pass columns: `query(Job.jobno)` would return only a list of job
numbers.
"""
if not args:
args = (Job,)
session = self.get_session()
return session.query(*args)
##########################################################################
# Job State Searches #
##########################################################################
def get_jobs(self, state=None):
"""Return list of Jobs for all jobs that match state."""
q = self.query()
if state:
q = q.filter(Job.state == state)
return q.all()
@property
def running(self):
"""All running jobs."""
return self.get_jobs(state='running')
@property
def queued(self):
"""All queued jobs."""
return self.get_jobs(state='pending')
@property
def completed(self):
"""All completed jobs."""
return self.get_jobs(state='completed')
@property
def failed(self):
"""All failed jobs."""
return self.get_jobs(state='failed')
##########################################################################
# Job Management #
##########################################################################
def set_running_jobs_failed(self):
"""Change all running jobs to failed."""
pass
##########################################################################
# Maintenance Methods #
##########################################################################
def create_database(self, confirm=True):
"""Create the db from scratch.
Note: Will DELETE the current database!!!
"""
if confirm:
ans = _run.get_yesno(
'Are you sure you want to erase and recreate the db?'
)
if not ans:
sys.stderr.write('Aborting\n')
return False
_logme.log('Recreating database', 'info', also_write='stderr')
if _os.path.exists(self.db_file):
_os.remove(self.db_file)
Base.metadata.create_all(self.engine)
_logme.log('Done', 'info', also_write='stderr')
##########################################################################
# Internals #
##########################################################################
def __getitem__(self, x):
"""Quick access to jobs by ID."""
if isinstance(x, (_str, _txt)):
return self.query().filter(Job.jobno == x).all()
def __len__(self):
"""Print the length."""
return self.query(Job).count()
def __repr__(self):
"""Basic information about self."""
return 'LocalQueue<{location}>'.format(location=self.db_file)
def __str__(self):
"""Basic information about self."""
return self.__repr__()
###############################################################################
# Management Functions #
###############################################################################
def initialize():
"""Initialize the database and config directory.
Returns
-------
success : bool
"""
if not _os.path.isdir(RUN_DIR):
_os.makedirs(RUN_DIR)
if not _os.path.isfile(DATABASE):
db = LocalQueue(DATABASE)
db.create_database(confirm=False)
return True
def check_conf():
"""Make sure the config directory exists, initialize if not.
Returns
-------
success : bool
"""
if _os.path.isdir(RUN_DIR) and _os.path.isfile(DATABASE):
return True
return initialize()
def server_running():
"""Return True if server currently running."""
if not _os.path.isfile(PID_FILE):
return False
with open(PID_FILE) as fin:
pid = int(fin.read().strip())
return _pid_exists(pid)
def start_server():
"""Start the server as a separate thread.
Returns
-------
pid : int
"""
_logme.log('Starting local queue server with 2 second sleep', 'info')
_sleep(2)
# subprocess.check_call([sys.executable, us, 'start'])
if not server_running():
daemon_manager('start')
_sleep(1)
if not server_running():
_logme.log('Cannot start server', 'critical')
raise QueueError('Cannot start server')
with open(PID_FILE) as fin:
pid = int(fin.read().strip())
return pid
def get_server_uri(start=True):
"""Check status and return a server URI."""
check_conf()
# Remote queue server (Pyro4)
if not _WE_ARE_A_SERVER and not _os.path.isfile(PID_FILE):
if _os.path.isfile(URI_FILE):
with open(URI_FILE) as fin:
return fin.read().strip()
else:
uri = get_uri()
if uri is None:
if start:
raise QueueError('Cannot reach to remote queue server')
else:
return False
else:
return uri
if (_WE_ARE_A_SERVER or not start) and not _os.path.isfile(PID_FILE):
return False
if not _os.path.isfile(PID_FILE):
if _os.path.isfile(URI_FILE):
_os.remove(URI_FILE)
if not start_server():
return False
with open(PID_FILE) as fin:
pid = int(fin.read().strip())
if not _pid_exists(pid):
_os.remove(PID_FILE)
if _os.path.isfile(URI_FILE):
_os.remove(URI_FILE)
if _WE_ARE_A_SERVER or not start:
return False
pid = start_server()
if not pid or not _pid_exists(pid):
raise OSError('Server not starting')
# Server running now
with open(URI_FILE) as fin:
return fin.read().strip()
RESTART_TRY = False
def get_server(start=True, raise_on_error=False):
"""Return a client-side QueueManager instance."""
uri = get_server_uri(start=start)
if not uri:
if raise_on_error:
raise QueueError('Cannot get server')
return None
server = Pyro4.Proxy(uri)
# Test for bad connection
try:
server._pyroBind()
except Pyro4.errors.CommunicationError:
global RESTART_TRY
if RESTART_TRY:
_logme.log(
"Cannot bind to server still. Failing. Try to kill the "
"process in {}".format(PID_FILE),
'critical'
)
if raise_on_error:
raise QueueError('Cannot get server')
return None
RESTART_TRY = True
_logme.log("Cannot bind to server, killing and retrying.", 'error')
kill_queue()
server = get_server(start, raise_on_error)
RESTART_TRY = False
return server
def _pid_exists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
From: https://stackoverflow.com/questions/568271/#6940314
"""
pid = int(pid)
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
_os.kill(pid, 0)
except OSError as err:
if err.errno == _errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == _errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
###############################################################################
# Core Queue Manager and Runner #
###############################################################################
def get_queue_manager():
"""Return a client of QueueManager."""
return get_server()
@Pyro4.expose
class QueueManager(object):
"""Monitor a queue and run jobs.
Actual job execution done by the job_runner process, which forks.
.. note:: This class should not be accessed directly, it is intended to
run as a daemon. Use `get_queue_manager()` to get the client class.
Attributes
----------
jobs : dict
Dictionary of running jobs
max_jobs : int
The maximum number of jobs to run at one time. Defaults to current
CPUs - 1
"""
jobs = {}
all_jobs = []
max_jobs = None
inqueue = mp.Queue()
outqueue = mp.Queue()
_job_runner = None
def __init__(self, daemon, max_jobs=None):
"""Create the QueueManager
Paramenters
-----------
max_jobs : int, optional
The maximum number of jobs to run at one time. Defaults to current
CPUs - 1
"""
if not max_jobs:
cmax = _conf.get_option('local', 'max_jobs')
max_jobs = cmax if cmax else MAX_JOBS
self.max_jobs = int(max_jobs)
# Don't use more than the available cores
if self.max_jobs > mp.cpu_count():
self.max_jobs = mp.cpu_count()
# Unless there are fewer than 4 cores, we need at least 4 for split
# jobs, so we hard code 4 as the minimum
if self.max_jobs < 4:
self.max_jobs = 4
self.db = LocalQueue(DATABASE)
self.daemon = daemon
self.all_jobs = [i[0] for i in self.db.query(Job.jobno).all()]
self.check_runner()
# Set all existing to disappeared, as they must be dead if we are
# starting again
session = self.db.get_session()
q = session.query(Job).filter(
Job.state.in_(['running', 'pending', 'queued'])
)
bad = []
for job in q.all():
bad.append(str(job.jobno))
job.state = 'killed'
session.commit()
session.close()
if bad:
_logme.log('Jobs {0} were marked as killed as we are restarting'
.format(','.join(bad)), 'warn')
##########################################################################
# Job Submission and Management #
##########################################################################
@Pyro4.expose
def submit(self, command, name, threads=1, dependencies=None,
stdout=None, stderr=None, runpath=None):
"""Submit a job and add it to the database.
Parameters
----------
command : str
A full executable shell script/shell command.
name : str
A name to give the job
threads : int, optional
The number of cores required by the job
dependencies : list of int, optional
A list of job numbers that must be complete prior to execution
stdout, stderr : str, optional
A path to a file to write STDOUT and STDERR to respectively
runpath : str, optional
A path to execute the command in
Returns
-------
jobno : int
Raises
------
QueueError
If a dependency is not in the job db already or command is invalid
"""
_logme.log('Submitting: {}'.format(str(locals())), 'debug')
session = self.db.get_session()
threads = int(threads)
if not isinstance(command, (_str, _txt)):
raise ValueError('command is {0}, type{1}, cannot continue'
.format(command, type(command)))
depends = []
if dependencies:
for dep in dependencies:
dep = int(dep)
if not self.check_jobno(dep):
raise QueueError('Invalid dependencies')
depends.append(dep)
job = Job(name=name, command=command, threads=threads, state='pending',
submit_time=_dt.now())
if stdout:
job.outfile = stdout
if stderr:
job.errfile = stderr
if runpath:
job.runpath = runpath
try:
session.add(job)
except InvalidRequestError:
# In case open in another thread
local_job = session.merge(job)
other_session = session.object_session(job)
session.add(local_job)
session.commit()
other_session.close()
session.flush()
jobno = int(job.jobno)
session.commit()
session.close()
self.check_runner()
self.inqueue.put(
('queue',
(jobno, command, threads, depends, stdout, stderr, runpath))
)
self.jobs[jobno] = job
self.all_jobs.append(jobno)
return jobno
@Pyro4.expose
def get(self, jobs=None, preclean=True):
"""Return a list of updated jobs.
Parameters
----------
jobs : list of int, optional
A list of job numbers, a single job number is also fine
preclean : bool
If True run `clean()` first to remove old jobs
Returns
-------
jobs : list of tuple
[(jobno, name, command, state, threads,
exitcode, runpath, outfile, errfile)]
"""
if preclean:
self.clean()
session = self.db.get_session()
# Pyro cannot serialize Job objects, so we get a tuple instead
q = session.query(
Job.jobno, Job.name, Job.command, Job.state, Job.threads,
Job.exitcode, Job.runpath, Job.outfile, Job.errfile
)
if jobs:
jobs = [jobs] if isinstance(jobs, (_int, _str, _txt)) else jobs
jobs = [int(j) for j in jobs]
q = q.filter(Job.jobno.in_(jobs))
res = q.all()
session.close()
return res
@Pyro4.expose
def clean(self, days=None):
"""Delete all jobs in the queue older than days days.
Very fast if no jobs are older than the cutoff as the query returns
an empty set.
Parameters
----------
days : int, optional
Set number of days to clean, default set in the config file.
"""
if days:
clean_days = int(days)
else:
clean_days = int(_conf.get_option(
'local', 'local_clean_days',CLEAN_OLDER_THAN
))
current_time = _dt.now()
cutoff = current_time - _td(days=clean_days)
jobs = self.db.query().filter(Job.submit_time < cutoff).all()
if not jobs:
return
session = self.db.get_session()
for job in jobs:
try:
session.delete(job)
except InvalidRequestError:
# In case open in another thread
local_job = session.merge(job)
other_session = session.object_session(job)
session.delete(local_job)
session.commit()
other_session.close()
session.commit()
session.close()
@Pyro4.expose
def kill(self, jobs):
"""Kill running or pending jobs.
Parameters
----------
jobs : list of int
"""
if isinstance(jobs, (_str, _txt)):
jobs = [int(jobs)]
elif isinstance(jobs, int):
jobs = [jobs]
else:
try:
jobs = list(jobs)
except TypeError:
jobs = [jobs]
for job in jobs:
self.check_jobno(job)
self.inqueue.put('kill', int(job))
jobs = self.get(jobs)
ok_states = ['killed', 'completed', 'failed']
for job in jobs:
if job.state not in ok_states:
return False
return True
@Pyro4.expose
@property
def available_cores(self):
"""Return an integer count of free cores."""
self.inqueue.put('available_cores')
cores = self.outqueue.get()
return int(cores)
@Pyro4.expose
@property
def python(self):
"""Return the python interpreter on the server"""
python = _run.which(sys.executable)
return python
##########################################################################
# Database Management #
##########################################################################
@Pyro4.expose
def update_job(self, jobno, state=None, exitcode=None, pid=None):
"""Update either the state or the exitcode of a job in the DB."""
session = self.db.get_session()
job = session.query(Job).filter(Job.jobno == int(jobno)).first()
if state:
job.state = state
if isinstance(exitcode, int):
job.exitcode = exitcode
if isinstance(pid, int):
job.pid = pid
session.flush()
session.commit()
session.close()
# def _housekeeping(self):
# """Run by Pyro4, update all_jobs, db cache, and clean up."""
# self.clean()
# all_jobs = []
# cache = {}
# session = self.db.get_session()
# for job in session.query(Job).all():
# all_jobs.append(job.id)
# cache[job.id] = job
# self.all_jobs = all_jobs
# self._cache = cache
##########################################################################
# Shutdown #
##########################################################################
@Pyro4.expose
def shutdown_jobs(self):
"""Kill all jobs and terminate."""
result = None
if not self.inqueue._closed:
self.inqueue.put('stop')
print('waiting for jobs to terminate gracefully')
try:
result = self.outqueue.get(timeout=STOP_WAIT)
except Empty:
pass
print('killing runner')
_kill_proc_tree(self._job_runner.pid)
if _pid_exists(self._job_runner.pid):
_os.kill(self._job_runner.pid, _signal.SIGKILL)
print('job_runner killing done')
try:
self.inqueue.close()
self.outqueue.close()
except AttributeError:
pass
self.daemon.shutdown()
if result is None:
return None
elif result:
return True
return False
##########################################################################
# Error Checking #
##########################################################################
def check_jobno(self, jobno):
"""Check if jobno in self.all_jobs, raise QueueError if not."""
if jobno not in self.all_jobs:
_logme.log('Job number {0} does not exist.'.format(jobno), 'error')
return False
return True
def check_runner(self):
"""Make sure job_runner is active and start it if inactive."""
if self._job_runner and self._job_runner.is_alive():
return self._job_runner
runner = mp.Process(
target=job_runner,
args=(self.inqueue, self.outqueue, self.max_jobs)
)
runner.start()
self._job_runner = runner
@property
def job_runner(self):
"""A job runner process."""
self.check_runner()
return self._job_runner
##########################################################################
# Interaction Stuff #
##########################################################################
# @Pyro4.expose
# @property
# def running(self):
# """Return all running job ids from the cache, no new database query."""
# return [job.id for job in self._cache if job.state == 'running']
# @Pyro4.expose
# @property
# def pending(self):
# """Return all pending job ids from the cache, no new database query."""
# return [job.id for job in self._cache if job.state == 'pending']
# @Pyro4.expose
# @property
# def completed(self):
# """Return all completed job ids from the cache, no new database query."""
# return [job.id for job in self._cache if job.state == 'completed']
# @Pyro4.expose
# @property
# def failed(self):
# """Return all failed job ids from the cache, no new database query."""
# return [job.id for job in self._cache if job.state == 'failed']
# def __repr__(self):
# """Simple information."""
# return "LocalQueue<running:{0};pending:{1};completed:{2}".format(
# self.running, self.pending, self.completed
# )
# def __str__(self):
# """Simple information."""
# return self.__repr__()
@Pyro4.expose
def __len__(self):
"""Length from the cache, no new database query."""
return len(self.all_jobs)
# @Pyro4.expose
# def __getitem__(self, key):
# """Get a job by id."""
# job = self._cache[key]
# return (
# job.jobno, job.name, job.command, job.state, job.threads,
# job.exitcode, job.runpath, job.outfile, job.errfile
# )
def job_runner(inqueue, outqueue, max_jobs):
"""Run jobs with dependency tracking.
Terminate by sending 'stop' to inqueue
Parameters
----------
inqueue : multiprocessing.Queue
inqueue puts must be in the form : (command, extra):
`('stop')` : immediately shutdown this process
`('queue', job_info)` : queue and run this job
`('kill', jobno)` : immediately kill this job
`('available_cores')` : put available core count in outqueue
job_info must be in the form:
`(int(jobno), str(command), int(threads), list(dependencies))`
outqueue : multiprocessing.Queue
job information available_cores if argument was available_cores
max_jobs : int
The maximum number of concurrently running jobs, will be adjusted to
be 4 <= max_jobs <= cpu_count. The minimum of 4 jobs is a hard limit
and is enforced, so a machine with only 2 cores will still end up with
4 jobs running. This is required to avoid hangs on some kinds of fyrd
jobs, where a split job is created from a child process.
Returns
-------
bool
If 'stop' is sent, will return `True` if there are no running or
pending jobs and `False` if there are still running or pending jobs.
Raises
------
QueueError
If invalid argument put into inqueue
"""
if not _WE_ARE_A_SERVER:
return
tries = 5
while tries:
qserver = get_server()
if qserver:
break
_sleep(1)
tries -= 1
continue
if not qserver:
qserver = get_server(raise_on_error=True)
max_jobs = int(max_jobs)
if max_jobs < mp.cpu_count():
max_jobs = mp.cpu_count()
if max_jobs < 4:
max_jobs = 4
available_cores = max_jobs
running = {} # {jobno: Process}
queued = _OD() # {jobno: {'command': command, 'depends': depends, ...}
done = {} # {jobno: Process}
jobs = [] # [jobno, ...]
put_core_info = False
while True:
# Get everything from the input queue first, queue everything
while True:
if inqueue.empty():
break
info = inqueue.get() # Will block if input queue empty
if info == 'stop' or info[0] == 'stop':
good = True
pids = []
if running:
good = False
for jobno, job in running.items():
qserver.update_job(jobno, state='killed')
pids.append(job.pid)
job.terminate()
if queued:
good = False
for jobno, job in queued.items():
qserver.update_job(jobno, state='killed')
for pid in pids:
if _pid_exists(pid):
_os.kill(pid, _signal.SIGKILL)
outqueue.put(good)
return good
if info == 'available_cores' or info[0] == 'available_cores':
put_core_info = True
continue
if info[0] == 'kill':
jobno = int(info[1])
if jobno in running:
running[jobno].terminate()
qserver.update_job(jobno, state='killed')
running.pop(jobno)
if jobno in queued:
queued.pop(jobno)
qserver.update_job(jobno, state='killed')
continue
if info[0] != 'queue':
raise QueueError('Invalid argument: {0}'.format(info[0]))
jobno, command, threads, depends, stdout, stderr, runpath = info[1]
if not command:
raise QueueError('Job command is {0}, cannot continue'
.format(type(command)))
jobno = int(jobno)
threads = int(threads)
# Run anyway
if threads >= max_jobs:
threads = max_jobs-1
# Add to queue
if jobno in jobs:
# This should never happen
raise QueueError('Job already submitted!')
jobs.append(jobno)
queued[jobno] = {'command': command, 'threads': threads,
'depends': depends, 'stdout': stdout,
'stderr': stderr, 'runpath': runpath}
qserver.update_job(jobno, state='pending')
# Update running and done queues
for jobno, process in running.items():
if process.is_alive():
continue
# Completed
process.join()
code = process.exitcode
state = 'completed' if code == 0 else 'failed'
qserver.update_job(jobno, state=state, exitcode=code)
done[jobno] = process
# Remove completed jobs from running
for jobno in done:
if jobno in running:
p = running.pop(jobno)
available_cores += p.cores
# Start jobs if can run
if available_cores > max_jobs:
available_cores = max_jobs
if available_cores < 0: # Shouldn't happen
available_cores = 0
if put_core_info:
outqueue.put(available_cores)
put_core_info = False
for jobno, info in queued.items():
if info['depends']:
not_done = []
for dep_id in info['depends']:
if dep_id not in done:
not_done.append(dep_id)
if not_done:
continue
if info['threads'] <= available_cores:
if info['runpath']:
curpath = _os.path.abspath('.')
_os.chdir(info['runpath'])
p = mp.Process(
target=_run.cmd,
args=(info['command'],),
kwargs={
'stdout': info['stdout'],
'stderr': info['stderr'],
}
)
p.daemon = True
p.start()
running[jobno] = p
available_cores -= info['threads']
p.cores = info['threads']
if info['runpath']:
_os.chdir(curpath)
qserver.update_job(jobno, state='running', pid=p.pid)
# Clear running jobs from queue
for jobno in running:
if jobno in queued:
queued.pop(jobno)
# Block for a moment to avoid running at 100% cpu
_sleep(SLEEP_LEN)
###############################################################################
# Daemon Creation and Management Functions #
###############################################################################
def get_uri():
"""Get the URI from the config or file.
Tests if URI is active before returning.
Returns
-------
uri : str or None
If file does not exist or URI is inactive, returns None and deletes
URI_FILE, else returns the URI as a string.
"""
curi = _conf.get_option('local', 'server_uri')
if curi:
t = _test_uri(curi)
if t == 'connected':
with open(URI_FILE, 'w') as fout:
fout.write(str(curi))
return curi
if t == 'invalid':
_conf.set_option('local', 'server_uri', None)
return None
if not _os.path.isfile(URI_FILE):
return None
with open(URI_FILE) as fin:
uri = fin.read().strip()
t = _test_uri(uri)
if t == 'connected':
return uri
_os.remove(URI_FILE)
return None
def _test_uri(uri):
"""Test if a URI refers to an accessible Pyro4 object."""
try:
p = Pyro4.Proxy(uri)
except Pyro4.errors.PyroError:
_logme.log('URI {0} in an invalid URI'.format(uri), 'error')
return 'invalid'
try:
if p._pyroBind():
out = 'connected'
elif p.available_cores:
out = 'connected'
else:
out = 'disconnect'
if out == 'connected':
p._pyroRelease()
return out
except Pyro4.errors.CommunicationError:
_logme.log('URI {0} is not connected'.format(uri), 'warn')
return 'disconnect'
def daemonizer():
"""Create the server daemon."""
# Get pre-configured URI if available
curi = _conf.get_option('local', 'server_uri')
utest = _test_uri(curi) if curi else None
# Test if there is already another daemon running
crun = True if utest == 'connected' else False
if crun or server_running():
raise QueueError('Daemon already running, cannot start')
# Set port and host if present in URI
if utest == 'disconnect':
uri = Pyro4.URI(curi)
args = {'host': uri.host, 'port': uri.port}
objId = uri.object
else:
args = {}
objId = "QueueManager"
# Create the daemon
with Pyro4.Daemon(**args) as daemon:
queue_manager = QueueManager(daemon)
uri = daemon.register(queue_manager, objectId=objId)
# daemon.housekeeping = queue_manager._housekeeping
with open(PID_FILE, 'w') as fout:
fout.write(str(_os.getpid()))
with open(URI_FILE, 'w') as fout:
fout.write(str(uri))
print("Ready. Object uri =", uri)
daemon.requestLoop()
def shutdown_queue():
"""Kill the server and queue gracefully."""
good = True
server = get_server(start=False)
if server:
try:
res = server.shutdown_jobs()
except OSError:
res = None
except Pyro4.errors.CommunicationError:
res = None
_logme.log('Local queue runner terminated.', 'debug')
if res is None:
_logme.log('Could not determine process completion state',
'warn')
good = False
elif res:
_logme.log('All jobs completed', 'debug')
else:
_logme.log('Some jobs failed!', 'error', also_write='stderr')
good = False
else:
_logme.log('Server appears already stopped', 'info')
kill_queue()
_logme.log('Local queue terminated', 'info')
return 0 if good else 1
def kill_queue():
"""Kill the server and queue without trying to clean jobs."""
if _os.path.isfile(PID_FILE):
with open(PID_FILE) as fin:
pid = int(fin.read().strip())
_os.remove(PID_FILE)
_kill_proc_tree(pid, including_parent=True)
if _pid_exists(pid):
_os.kill(pid, _signal.SIGKILL)
if _os.path.isfile(URI_FILE):
_os.remove(URI_FILE)
def daemon_manager(mode):
"""Manage the daemon process
Parameters
----------
mode : {'start', 'stop', 'restart', 'status'}
Returns
-------
status : int
0 on success, 1 on failure
"""
global _WE_ARE_A_SERVER
_WE_ARE_A_SERVER = True
check_conf()
if mode == 'start':
return _start()
elif mode == 'stop':
return _stop()
elif mode == 'restart':
_stop()
return _start()
elif mode == 'status':
running = server_running()
if running:
_logme.log('Local queue server is running', 'info',
also_write='stderr')
else:
_logme.log('Local queue server is not running', 'info',
also_write='stderr')
return 0 if running else 1
_logme.log('Invalid mode {0}'.format(mode), 'error')
return 1
def _start():
"""Start the daemon process as a fork."""
if _os.path.isfile(PID_FILE):
with open(PID_FILE) as fin:
pid = fin.read().strip()
if _pid_exists(pid):
_logme.log('Local queue already running with pid {0}'
.format(pid), 'info')
return 1
_os.remove(PID_FILE)
pid = _os.fork()
if pid == 0: # The first child.
daemonizer()
else:
_logme.log('Local queue starting', 'info')
_sleep(1)
if server_running():
return 0
_logme.log('Server failed to start', 'critical')
return 1
def _stop():
"""Stop the daemon process."""
if not _os.path.isfile(PID_FILE):
_logme.log('Queue does not appear to be running, cannot stop',
'info')
return 1
return shutdown_queue()
def _kill_proc_tree(pid, including_parent=True):
"""Kill an entire process tree."""
parent = _psutil.Process(int(pid))
if hasattr(parent, 'get_children'):
parent.children = parent.get_children
for child in parent.children(recursive=True):
child.kill()
if including_parent:
parent.kill()
###############################################################################
# Fyrd Functions #
###############################################################################
###############################################################################
# Functionality Test #
###############################################################################
def queue_test(warn=True):
"""Check that this batch system can be used.
Parameters
----------
warn : bool
log a warning on fail
Returns
-------
batch_system_functional : bool
"""
# Check for a remote server_uri is running
_logme.log('Checking for a remote queue server_uri (Pyro4)', 'debug')
uri = get_uri()
if uri is not None:
_logme.log('Remote queue server is running at {}'.format(uri), 'debug')
return True
log_level = 'error' if warn else 'debug'
try:
if not server_running():
start_server()
return server_running()
except:
_logme.log('Cannot get local queue sever address', log_level)
return False
###############################################################################
# Normalization Functions #
###############################################################################
def normalize_job_id(job_id):
"""Convert the job id into job_id, array_id."""
return str(int(job_id)), None
def normalize_state(state):
"""Convert state into standardized (slurm style) state."""
state = state.lower()
if state == 'queued':
state = 'pending'
return state
###############################################################################
# Job Submission #
###############################################################################
def python():
"""Return the python interpreter path on the server side.
Returns
-------
python : python interpreter path
"""
server = get_server()
return server.python
def gen_scripts(job_object, command, args, precmd, modstr):
"""Build the submission script objects.
Parameters
---------
job_object : fyrd.job.Job
command : str
Command to execute
args : list
List of additional arguments, not used in this script.
precmd : str
String from options_to_string() to add at the top of the file, should
contain batch system directives
modstr : str
String to add after precmd, should contain module directives.
Returns
-------
fyrd.script_runners.Script
The submission script
None
Would be the exec_script, not used here.
"""
scrpt = '{0}.{1}.{2}'.format(job_object.name, job_object.suffix, SUFFIX)
job_object._mode = 'remote'
sub_script = _scrpts.CMND_RUNNER_TRACK.format(
precmd=precmd, usedir=job_object.runpath, name=job_object.name,
command=command
)
job_object._mode = 'local'
return _Script(script=sub_script, file_name=scrpt, job=job_object), None
def submit(script, dependencies=None, job=None, args=None, kwds=None):
"""Submit any script with dependencies.
.. note:: this function can only use the following fyrd keywords:
cores, name, outfile, errfile, runpath
We get those in the following order:
1. Job object
2. args
3. kwds
None of these keywords are required. If they do not exist, cores is set to
1, name is set to `file_name`, no runpath is used, and STDOUT/STDERR are
not saved
Parameters
----------
script : fyrd.Script
Script to be submitted
dependencies : list
List of dependencies
job : fyrd.job.Job, optional
A job object for the calling job, used to get cores, outfile, errfile,
runpath, and name if available
args : list, optional
A list of additional arguments, only parsed if list of tuple in the
format `[(key, value)]`. Comes from output of `parse_strange_options()`
kwds : dict or str, optional
A dictionary of keyword arguments to parse with options_to_string, or
a string of option:value,option,option:value,....
Used to get any of cores, outfile, errfile, runpath, or name
Returns
-------
job_id : str
"""
job._mode = 'remote'
params = {}
needed_params = ['cores', 'outfile', 'errfile', 'runpath', 'name']
if job:
params['cores'] = job.cores
params['outfile'] = job.outfile
params['errfile'] = job.errfile
params['runpath'] = job.runpath
params['name'] = job.name
if args and isinstance(args[0], (list, tuple)):
for k, v in args:
if k in needed_params and k not in params:
params[k] = v
if kwds:
if not isinstance(kwds, dict):
kwds = {k: v for k, v in [i.split(':') for i in kwds.split(',')]}
_, extra_args = _options.options_to_string(kwds)
for k, v in extra_args:
if k in needed_params and k not in params:
params[k] = v
if 'cores' not in params:
params['cores'] = 1
if 'name' not in params:
params['name'] = _os.path.basename(script.file_name)
for param in needed_params:
if param not in params:
params[param] = None
# Submit the job
server = get_server()
job._mode = 'local'
if not _os.path.isfile(script.file_name):
raise QueueError('File {0} does not exist, cannot submit'
.format(script.file_name))
job._mode = 'remote'
command = 'bash {0}'.format(script.file_name)
_logme.log("Submitting job '{}' with params: {}".format(command,
str(params)),
'debug')
jobno = server.submit(
command, params['name'], threads=params['cores'],
dependencies=dependencies, stdout=params['outfile'],
stderr=params['errfile'], runpath=params['runpath']
)
job._mode = 'local'
return str(jobno)
###############################################################################
# Job Management #
###############################################################################
def kill(job_ids):
"""Terminate all jobs in job_ids.
Parameters
----------
job_ids : list or str
A list of valid job ids or a single valid job id
Returns
-------
success : bool
"""
server = get_server()
return server.kill(job_ids)
###############################################################################
# Queue Parsing #
###############################################################################
def queue_parser(user=None, partition=None):
"""Iterator for queue parsing.
Simply ignores user and partition requests.
Parameters
----------
user : str, NOT IMPLEMENTED
User name to pass to qstat to filter queue with
partition : str, NOT IMPLEMENTED
Partition to filter the queue with
Yields
------
job_id : str
array_id : str or None
name : str
userid : str
partition : str
state :str
nodelist : list
numnodes : int
cntpernode : int or None
exit_code : int or Nonw
"""
server = get_server(start=True)
user = _getpass.getuser()
host = _socket.gethostname()
for job in server.get(): # Get all jobs in the database
job_id = str(job[0])
array_id = None
name = job[1]
userid = user
partition = None
state = normalize_state(job[3])
nodelist = [host]
numnodes = 1
cntpernode = job[4]
exit_code = job[5]
yield (job_id, array_id, name, userid, partition, state, nodelist,
numnodes, cntpernode, exit_code)
def parse_strange_options(option_dict):
"""Parse all options that cannot be handled by the regular function.
Because we do not parse the submission file, all keywords will be placed
into the final return list.
Parameters
----------
option_dict : dict
All keyword arguments passed by the user that are not already defined
in the Job object
Returns
-------
list
An empty list
dict
An empty dictionary
list
A list of options that can be used by `submit()`
Ends up in the `args` parameter of the submit function
"""
outlist = []
good_items = ['outfile', 'cores', 'errfile', 'runpath']
for opt, var in option_dict.items():
if opt in good_items:
outlist.append((opt, var))
return [], {}, outlist
###############################################################################
# User Interaction #
###############################################################################
def command_line_parser():
"""Parse command line options.
Returns
-------
parser
"""
parser = _argparse.ArgumentParser(
description=__doc__,
formatter_class=_argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'mode', choices={'start', 'stop', 'status', 'restart'},
metavar='{start,stop,status,restart}', help='Server command'
)
return parser
def main(argv=None):
"""Parse command line options to run as a script."""
if not argv:
argv = sys.argv[1:]
parser = command_line_parser()
args = parser.parse_args(argv)
# Call the subparser function
return daemon_manager(args.mode)
if __name__ == '__main__' and '__file__' in globals():
sys.exit(main())
| python | 50,987 |
# Best Time to Buy and Sell Stock: https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
# You are given an array prices where prices[i] is the price of a given stock on the ith day.
# You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.
# Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.
# Initial thought here is to take the lowest price if you see a lower one else check if the dif between lowest and cur is greater than max
# This was correct however I didn't write the brute force which is that You can do a double for loop checking each combo
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) <= 1:
return 0
profit, lowest = 0, prices[0]
for i in range(len(prices)):
if prices[i] <= lowest:
lowest = prices[i]
else:
profit = max(profit, prices[i] - lowest)
return profit
# Score Card
# Did I need hints? N
# Did you finish within 30 min? Y
# Was the solution optimal? Yes this is an o(n) time and o(1) space which is optimal over brute force o(n^2)
# Were there any bugs? None :D
# 5 5 5 5 = 5
| python | 1,295 |
from panel.orbital.orbit_plot import OrbitPlot
from panel.orbital.orbital_points_plot import OrbitalPointsPlot
from panel.orbital.trajectory_ellipse import TrajectoryEllipse
from panel.telemetry.ellipse import EllipseData
class OrbitalPointsEllipse(OrbitalPointsPlot):
def update(self, telemetry):
self.periapsis_plot.update_plot(telemetry)
self.apoapsis_plot.update_plot(telemetry)
self.ascending_plot.update_plot(telemetry)
self.descending_plot.update_plot(telemetry)
self.ascending_descending.update_plot(telemetry)
self.vessel_plot.update_plot(telemetry)
class OrbitEllipse(OrbitPlot):
def __init__(self, axes):
OrbitPlot.__init__(self, compute_class=EllipseData)
self.trajectory = TrajectoryEllipse(axes)
self.points = OrbitalPointsEllipse(axes)
| python | 838 |
""" implement the TimedeltaIndex """
from datetime import datetime
import numpy as np
from pandas._libs import NaT, Timedelta, index as libindex
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_float,
is_integer,
is_list_like,
is_scalar,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core.accessor import delegate_names
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin,
DatetimelikeDelegateMixin,
DatetimeTimedeltaMixin,
)
from pandas.core.indexes.extension import inherit_names
from pandas.tseries.frequencies import to_offset
class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# Most attrs are dispatched via datetimelike_{ops,methods}
# Some are "raw" methods, the result is not re-boxed in an Index
# We also have a few "extra" attrs, which may or may not be raw,
# which we don't want to expose in the .dt accessor.
_raw_properties = {"components", "_box_func"}
_raw_methods = {"to_pytimedelta", "sum", "std", "median", "_format_native_types"}
_delegated_properties = TimedeltaArray._datetimelike_ops + list(_raw_properties)
_delegated_methods = (
TimedeltaArray._datetimelike_methods
+ list(_raw_methods)
+ ["_box_values", "__neg__", "__pos__", "__abs__"]
)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"_other_ops",
],
TimedeltaArray,
)
@delegate_names(
TimedeltaArray, TimedeltaDelegateMixin._delegated_properties, typ="property"
)
@delegate_names(
TimedeltaArray,
TimedeltaDelegateMixin._delegated_methods,
typ="method",
overwrite=True,
)
class TimedeltaIndex(
DatetimeTimedeltaMixin, dtl.TimelikeOps, TimedeltaDelegateMixin,
):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with.
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
Which is an integer/float number.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "timedeltaindex"
_engine_type = libindex.TimedeltaEngine
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
_is_numeric_dtype = True
_infer_as_myclass = True
# -------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
unit=None,
freq=None,
closed=None,
dtype=_TD_DTYPE,
copy=False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M' and 'Y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray):
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
@classmethod
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq)
else:
if freq is None:
freq = values.freq
assert isinstance(values, TimedeltaArray), type(values)
assert dtype == _TD_DTYPE, dtype
assert values.dtype == "m8[ns]", values.dtype
tdarr = TimedeltaArray._simple_new(values._data, freq=freq)
result = object.__new__(cls)
result._data = tdarr
result._name = name
# For groupby perf. See note in indexes/base about _index_data
result._index_data = tdarr._data
result._reset_identity()
return result
# -------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super().__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
# -------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
# -------------------------------------------------------------------
# Wrapping TimedeltaArray
def __getitem__(self, key):
result = self._data.__getitem__(key)
if is_scalar(result):
return result
return type(self)(result, name=self.name)
# -------------------------------------------------------------------
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# Have to repeat the check for 'timedelta64' (not ns) dtype
# so that we can return a numeric index, since pandas will return
# a TimedeltaIndex when dtype='timedelta'
result = self._data.astype(dtype, copy=copy)
if self.hasnans:
return Index(result, name=self.name)
return Index(result.astype("i8"), name=self.name)
return DatetimeIndexOpsMixin.astype(self, dtype, copy=copy)
def _maybe_promote(self, other):
if other.inferred_type == "timedelta":
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
value = Index.get_value(self, series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
else:
return com.maybe_box(self, value, series, key)
def get_value_maybe_box(self, series, key: Timedelta):
values = self._engine.get_value(com.values_from_object(series), key)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
# GH#20464 datetime check here is to ensure we don't allow
# datetime objects to be incorrectly treated as timedelta
# objects; NaT is a special case because it plays a double role
# as Not-A-Timedelta
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key) or key is NaT:
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ["ix", "loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution_string)
if side == "left":
return lbound
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
elif is_integer(label) or is_float(label):
self._invalid_indexer("slice", label)
return label
def _get_string_slice(self, key):
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer("slice", key)
loc = self._partial_td_slice(key)
return loc
def _partial_td_slice(self, key):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, str):
return key
raise NotImplementedError
@Substitution(klass="TimedeltaIndex")
@Appender(_shared_docs["searchsorted"])
def searchsorted(self, value, side="left", sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = Timedelta(value).asm8.view(_TD_DTYPE)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "timedelta"
@property
def inferred_type(self) -> str:
return "timedelta64"
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
If not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except ValueError:
# e.g. str that can't be parsed to timedelta
pass
elif is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)):
# check freq can be preserved on edge cases
if self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = Timedelta(item).asm8.view(_TD_DTYPE)
try:
new_tds = np.concatenate(
(self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
)
return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, str):
return self.astype(object).insert(loc, item)
raise TypeError("cannot insert TimedeltaIndex with incompatible label")
TimedeltaIndex._add_comparison_ops()
TimedeltaIndex._add_logical_methods_disabled()
def timedelta_range(
start=None, end=None, periods=None, freq=None, name=None, closed=None
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq, freq_infer = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
| python | 16,998 |
# Copyright (C) 2019 Quaternion Risk Manaement Ltd
# All rights reserved.
from OREAnalytics import *
fileLogger = FileLogger("log.txt")
Log.instance().registerLogger(fileLogger)
mask = 7
Log.instance().setMask(mask)
assert mask == Log.instance().mask()
Log.instance().switchOn()
ALOG("Alert Message")
CLOG("Critical Message")
ELOG("Error Message")
WLOG("Warning Message")
LOG("Notice Message")
DLOG("Debug Message")
TLOG("Data Message")
bufferLogger = BufferLogger(ORE_NOTICE)
Log.instance().registerLogger(bufferLogger)
mask = 255
Log.instance().setMask(mask)
msg_d = "This is a debug message."
msg_w = "This is a warning message."
msg_e = "This is a error message."
DLOG(msg_d)
WLOG(msg_w)
ELOG(msg_e)
#msg_d_buf = bufferLogger.next() filtered
msg_w_buf = bufferLogger.next()
msg_e_buf = bufferLogger.next()
print("Printing log message ...")
print(msg_w_buf)
print(msg_e_buf)
print("End of printing log message.")
# FIFO
assert msg_w in msg_w_buf
assert msg_e in msg_e_buf | python | 983 |
#!/usr/bin/env python
import nxt.locator
from nxt.sensor import *
from nxt.motor import *
from time import time, sleep
from threading import Thread
from pickle import dump, load
from os import listdir, remove, system
from os.path import isfile, join
class HaventGottenToThatYetError(Exception):
pass
class Brick:
def __init__(self, name=None):
self.draw = Display()
self.calibrate = Calibrator()
self.dev = nxt.locator.find_one_brick(name=name)
self.a = mjMotor(self.dev, 0)
self.b = mjMotor(self.dev, 1)
self.c = mjMotor(self.dev, 2)
#self.sync = mjSync(self.b.dev, self.c.dev)
def move(self, motors, power, degrees=None, rotations=None, seconds=None, steer=None, brake=True):
"""Move the robot. Use only one (or none at all) out of degrees, rotations, and seconds.
You can set steer when using multiple motors. -100 is hard left, 0 is center, 100 is hard right."""
power /= 100.0
power *= 127
power = int(round(power))
motorObjs = []
for motor in motors:
if motor.upper() == "A" or motor == 0:
motorObj = self.a
elif motor.upper() == "B" or motor == 1:
motorObj = self.b
else: # C
motorObj = self.c
motorObjs.append(motorObj)
# Is power 0? If it is, we're stopping, and that's all.
if power == 0:
for motorObj in motorObjs:
if brake:
motorObj.brake()
else:
motorObj.coast()
return
# How many motors?
if len(motors) == 2:
# We're syncing. Is steer provided?
if steer != None:
if steer < 0:
motorObjs.reverse() # Can't have negative steer
else:
# I guess we aren't steering.
steer = 0
motors = SynchronizedMotors(motorObjs[0].dev, motorObjs[1].dev, abs(steer))
# Now, how much do we turn?
if degrees != None:
#print("Turning motor at power "+str(power)+" and degrees "+str(degrees))
motors.turn(power, degrees, brake)
elif rotations != None:
motors.turn(power, rotations*360, brake)
elif seconds != None:
ts = time()
motors.run(power)
while time() < ts + seconds:
pass
if brake:
motors.brake()
else:
motors.idle()
else: # unlimited
motors.run(power)
elif len(motors) == 1: # Just steal code from the motor block
self.motor(motors[0], power, degrees, rotations, seconds, brake)
elif len(motors) == 3:
# def motor(self, port, power, degrees=None, rotations=None, seconds=None, brake=True):'
self.thread(self.motor, "A", power, degrees, rotations, seconds, brake)
self.thread(self.motor, "B", power, degrees, rotations, seconds, brake)
self.motor("C", power, degrees, rotations, seconds, brake)
def record(self, name, seconds=None):
"""Record movements.
Physically move the robot with your hands to record."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def play(self, name):
"""Play pre-recorded movements.
First, record with the record() function."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def playSound(self, name):
"""Play a sound file."""
name = "/home/pi/mindjackerSounds/"+name+".mp3"
system("mpg123 "+name)
def playNote(self, note, time, nxt=True, wait=True):
"""Play a note, e.g., 'a5', 'f#3', 'g#', 'c', etc.
If wait is false, don't wait for completion."""
# Separate the note from the octave
if len(note) == 1:
noteStr = note.lower()
octave = 5
elif len(note) == 3:
noteStr = note[0:2].lower()
octave = int(note[2])
elif len(note) == 2:
if note[1] == "#":
noteStr = note.lower()
octave = 5
else:
noteStr = note[0].lower()
octave = int(note[1])
# I got this algorithm from www.musique.com/schem/freq.html
notes = {"a":0, "a#":1, "b":2, "c":3, "c#":4, "d":5, "d#":6, "e":7, "f":8, "f#":9, "g":10, "g#":11}
noteNum = notes[noteStr]
octave -= 1
a = 2**octave
b = 1.059463**noteNum
z = round(275*a*b)/10
note = z
self.dev.play_tone(note, time*1000)
if wait:
sleep(time)
def touch(self, port):
"""Returns the state of the touch sensor."""
return Touch(self.dev, port-1).get_sample()
def sound(self, port):
"""Returns the level of sound from 0 to 100."""
level = Sound(self.dev, port-1).get_sample()
level /= 1023.0
level *= 100
level = int(round(level))
return level
def light(self, port):
"""Returns the level of light from 0 to 100.
UNTESTED!!!"""
level = Light(self.dev, port-1).get_sample()
level /= 1023.0
level *= 100
level = int(round(level))
return level
def lamp(self, port, active):
"""Turn on or off the lamp on the light sensor
Of selected port."""
Light(self.dev, port-1).set_illuminated(active)
# This doesn't work and returns weird values (1 or 5) regardless of color
# def color(self, port):
# """Returns the color read by the color sensor."""
# colorType = Color20(self.dev, port-1).get_sample()
# # if colorType == common.Type.COLORFULL:
# # return "White"
# # elif colorType == common.Type.COLORNONE:
# # return "Black"
# # elif colorType == common.Type.COLORRED:
# # return "Red"
# # elif colorType == common.Type.COLORGREEN:
# # return "Green"
# # elif colorType == common.Type.COLORBLUE:
# # return "Blue"
# return colorType
def colorLamp(self, port, color):
colorType = common.Type.COLORFULL # White
if color.lower() == "red":
colorType = common.Type.COLORRED
elif color.lower() == "green":
colorType = common.Type.COLORGREEN
elif color.lower() == "blue":
colorType = common.Type.COLORBLUE
elif color.lower() == "black" or color.lower() == "off":
colorType = common.Type.COLORNONE
Color20(self.dev, port-1).set_light_color(colorType)
def ultrasonic(self, port, convertToIn=True):
"""Returns the distance to an object in cm.
If convertToIn is true, the value is converted to in."""
dist = Ultrasonic(self.dev, port-1).get_sample()
if convertToIn:
dist /= 2.54
dist = int(round(dist))
return dist
ultra = ultrasonic
def buttons(self):
"""Returns the current state of the NXT buttons
In the form of a dict.
Use buttons()["enter"] to get the current state of the enter button, etc."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def encoder(self, port):
"""Returns the value of the encoder (rotation sensor/tachometer)
On the motor of the specified port."""
# Which motor?
if port.upper() == "A" or port == 0:
myMotor = self.a
elif port.upper() == "B" or port == 1:
myMotor = self.b
else: # C
myMotor = self.c
return myMotor.dev.get_tacho().rotation_count
def thread(self, func, *args, **kwargs):
"""Create a new thread with the specified function and arguments."""
newThread = Thread(target=func, args=args, kwargs=kwargs)
newThread.daemon = True
newThread.start()
def read(self, filename):
"""Read the file and return the contents. Uses pickle when necessary."""
pickled = False
onlyfiles = [f for f in listdir("/home/pi/mindjackerFiles/") if isfile(join("/home/pi/mindjackerFiles/", f))]
for f in onlyfiles:
if filename + ".txt" == f:
filename = "/home/pi/mindjackerFiles/"+filename+".txt"
break
if filename + ".pkl" == f:
filename = "/home/pi/mindjackerFiles/"+filename+".pkl"
pickled = True
break
else:
raise IOError, "File does not exist"
if pickled:
with open(filename, "rb") as myFile:
return load(myFile)
with open(filename, "r") as myFile:
return myFile.read()
def write(self, filename, data):
"""Write to the file. Uses pickle when necessary."""
if data == "": # If there's no data, we're asked to remove.
try:
remove("/home/pi/mindjackerFiles/"+filename+".txt")
except OSError:
pass
try:
remove("/home/pi/mindjackerFiles/"+filename+".pkl")
except OSError:
pass
return
# What follows: Making sure we don't have myfile.txt and myfile.pkl at the same time.
onlyfiles = [f for f in listdir("/home/pi/mindjackerFiles/") if isfile(join("/home/pi/mindjackerFiles/", f))]
for f in onlyfiles:
if filename + ".txt" == f:
remove("/home/pi/mindjackerFiles/"+filename+".txt")
if filename + ".pkl" == f:
remove("/home/pi/mindjackerFiles/"+filename+".pkl")
# Now should we use pickle?
if type(data) in [int, str, float, bool, long, unicode]:
self._writeText(filename, data)
else:
self._pickle(filename, data)
def _writeText(self, filename, data):
with open("/home/pi/mindjackerFiles/"+filename+".txt", "w") as myFile:
myFile.write(str(data))
def _pickle(self, filename, data):
with open("/home/pi/mindjackerFiles/"+filename+".pkl", "wb") as myFile:
dump(data, myFile)
def keepAlive(self):
"""Stop the NXT from falling asleep."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def motor(self, port, power, degrees=None, rotations=None, seconds=None, brake=True):
"""Move one motor. Set only one (or none at all) of degrees, rotations, seconds."""
power /= 100.0
power *= 127
power = int(round(power))
# Which motor?
if port.upper() == "A" or port == 0:
myMotor = self.a
elif port.upper() == "B" or port == 1:
myMotor = self.b
else: # C
myMotor = self.c
# If power is zero, we're being asked to stop the motor.
if power == 0:
if brake:
myMotor.brake()
else:
myMotor.coast()
return
# What are we doing? For a duration, or unlimited?
if degrees != None:
myMotor.turn(power, degrees, brake)
elif rotations != None:
myMotor.turn(power, rotations*360, brake)
elif seconds != None:
ts = time()
myMotor.run(power)
while time() < ts + seconds:
pass
if brake:
myMotor.brake()
else:
myMotor.coast()
else: # No params provided, run unlimited.
myMotor.run(power)
def resetMotor(self, port=None):
"""Reset the motor(s) internal encoders."""
# Which motor?
if port.upper() == "A" or port == 0:
myMotor = self.a
elif port.upper() == "B" or port == 1:
myMotor = self.b
else: # C
myMotor = self.c
myMotor.dev.reset_position(False)
class Display:
def __init__(self):
pass
def text(self, text, x, y):
"""Draw text on the screen."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def line(self, x1, y1, x2, y2):
"""Draw a line on the screen."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def circle(self, x, y, radius):
"""Draw a circle on the screen."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def image(self, filename):
"""Draw an image on the screen."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
class Calibrator:
def __init__(self):
pass
def sound(self, end):
"""Calibrate the sound sensor.
end: False for minimum, True for maximum."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
def light(self):
"""Calibrate the light sensor.
end: False for minimum, True for maximum."""
raise HaventGottenToThatYetError, "I ain't done writing this code yet."
class mjMotor:
def __init__(self, brick, port):
self.dev = Motor(brick, port)
def degrees(self):
'''Get the current rotation'''
return self.dev.get_tacho()
def run(self, power):
'''Run the motor indefinitely'''
power /= 128.0
power *= 100
self.dev.run(int(round(power)))
def brake(self):
'''Stop the motor quickly'''
self.dev.brake()
def coast(self):
'''Motor coasts to a stop'''
self.dev.idle()
def turn(self, power, degrees, brake=True):
'''Turn a certain number of degrees'''
power /= 128.0
power *= 100
self.dev.turn(int(round(power)), degrees, brake)
| python | 11,641 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adam
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.pop("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
kwargs["distribution"] = distribution.strategy
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with ops.Graph().as_default(), context.eager_mode():
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
lambda: distribute_lib._default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy_single_iteration = NamedDistribution(
"TPUSingleIteration",
lambda: tpu_lib.TPUStrategy(iterations_per_step=1),
required_tpu=True)
tpu_strategy = NamedDistribution("TPU", tpu_lib.TPUStrategy, required_tpu=True)
# Note that we disable prefetching for testing since prefetching makes
# the input non-deterministic.
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(
["/gpu:0", "/cpu:0"], prefetch_on_device=False),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(
["/gpu:0", "/gpu:1"], prefetch_on_device=False),
required_gpus=2)
adam_optimizer_v1_fn = NamedObject(
"AdamV1", lambda: adam.AdamOptimizer(0.2, epsilon=1))
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.2, epsilon=1))
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=[adam_optimizer_v1_fn, gradient_descent_optimizer_v1_fn])
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=[adam_optimizer_v2_fn, gradient_descent_optimizer_v2_fn])
| python | 13,257 |
#!/usr/bin/env python3.7
import sys
import socket
from ast import literal_eval
from gmpy2 import gcdext
def read_data(file):
return file.readline().decode().split(':')[-1].strip()
def main():
IP = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 7702
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(30)
sock.connect((IP, PORT))
file = sock.makefile('rwb')
print(file.readline().strip().decode())
print(file.readline().strip().decode())
sender_n, sender_e = literal_eval(read_data(file))
receiver_n, receiver_e = literal_eval(read_data(file))
assert sender_n == receiver_n
n = sender_n
message1 = int(read_data(file))
message2 = int(read_data(file))
message3 = int(read_data(file))
g, x, y = gcdext(sender_e, receiver_e)
if g > 1:
return main()
flag = int((pow(message1, x, n) * pow(message3, y, n)) % n)
print(flag.to_bytes((n.bit_length() + 7) // 8, 'big').strip(b'\x00'))
return
if __name__ == '__main__':
main()
| python | 1,120 |
#!/usr/bin/python
""" lecture and example code for decision tree unit """
import sys
sys.path.insert(0,'../../lib')
from class_vis import prettyPicture, output_image
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from sklearn.metrics import accuracy_score
features_train, labels_train, features_test, labels_test = makeTerrainData()
def classify(features_train, labels_train):
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
# random_state=None,
random_state=0,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
presort='deprecated',
ccp_alpha=0.0)
clf.fit(features_train, labels_train, sample_weight=None, check_input=True, X_idx_sorted=None)
return clf
clf = classify(features_train, labels_train)
# store your predictions in a list named pred
# pred = clf.predict(features_test)
# acc = accuracy_score(pred, labels_test)
acc = clf.score(features_test, labels_test)
def submitAccuracy():
return acc
print(acc)
#### grader code, do not modify below this line
prettyPicture(clf, features_test, labels_test)
output_image('test.png')
| python | 1,603 |
# Gemma IO demo - captouch
import time
import board
import touchio
touch0 = touchio.TouchIn(board.A0)
touch1 = touchio.TouchIn(board.A1)
touch2 = touchio.TouchIn(board.A2)
while True:
if touch0.value:
print("A0 touched!")
if touch1.value:
print("A1 touched!")
if touch2.value:
print("A2 touched!")
time.sleep(0.01)
| python | 359 |
#!/usr/bin/env python
#
# Example of one object under gravity with one contactor and a ground
# using the Siconos proposed mechanics API
#
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner
from siconos.mechanics.collision.bullet import SiconosBulletOptions
import siconos.numerics as sn
import siconos.kernel as sk
# Creation of the hdf5 file for input/output
with MechanicsHdf5Runner() as io:
# Definition of a sphere
io.add_primitive_shape('Disk', 'Disk', (2,),
insideMargin=0.0, outsideMargin=0.0)
# Definition of the ground shape
io.add_primitive_shape('Ground', 'Box2d', (10, 1),
insideMargin=0.0, outsideMargin=0.0)
# Definition of a non smooth law. As no group ids are specified it
# is between contactors of group id 0.
io.add_Newton_impact_friction_nsl('contact', mu=0.1, e=0.5)
# The sphere object made with an unique Contactor : the sphere shape.
# As a mass is given, it is a dynamic system involved in contact
# detection and in the simulation. With no group id specified the
# Contactor belongs to group 0
io.add_object('disk', [Contactor('Disk')],
translation=[0, 5.],
velocity=[0, 0, 0.5],
mass=1., inertia =2.0)
# the ground object made with the ground shape. As the mass is
# not given, it is a static object only involved in contact
# detection.
io.add_object('ground', [Contactor('Ground')],
translation=[0, -.5])
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
bullet_options = SiconosBulletOptions()
bullet_options.worldScale = 1.0
bullet_options.contactBreakingThreshold = 0.04
bullet_options.dimension = 1
options = sk.solver_options_create(sn.SICONOS_FRICTION_2D_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100000
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-8
with MechanicsHdf5Runner(mode='r+') as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
io.run(verbose=True,
with_timer=False,
bullet_options=bullet_options,
face_class=None,
edge_class=None,
t0=0,
T=8,
h=0.001,
multipoints_iterations=True,
theta=0.50001,
Newton_max_iter=1,
set_external_forces=None,
solver_options=options,
numerics_verbose=True,
output_frequency=None)
| python | 2,672 |
#!/usr/bin/env python
# PySCUBA/src/PySCUBA/Preprocessing.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: [email protected]; [email protected]
from __future__ import division
from operator import and_
from os import getcwd, makedirs, path
import re
from struct import calcsize, unpack
from sys import exit
from warnings import warn
import numpy as np
from rpy2.rinterface import NULL, TRUE
from rpy2.robjects import numpy2ri
from rpy2.robjects.packages import importr
from sklearn.manifold import TSNE
from . import Tree_classes
__all__ = ['Annotation', 'cytometry_preprocess', 'Cyto_data',
'FCS_handler', 'get_FCS_data', 'infer_pseudotime',
'PCR_preprocess', 'RNASeq_preprocess']
def infer_pseudotime(data, output_directory, tag = '', pcv_method = 'Rprincurve',
anchor_gene = None, markers = None):
assert pcv_method in {'Rprincurve'} # taking into account the possibility of adding
# in future versions other methods
# for principal curve analysis
N_dim = 3
model = TSNE(n_components = N_dim)
TSNE_data = model.fit_transform(data)
if pcv_method == 'Rprincurve':
with open(path.join(output_directory, "{0}_TSNE_d{1}.tsv".format(tag, N_dim)),
'w') as f:
f.write('\t'.join(['T{0}'.format(k) for k in xrange(1, N_dim + 1)]))
f.write('\n')
np.savetxt(f, TSNE_data, fmt = '%.6f', delimiter = '\t')
numpy2ri.activate()
princurve = importr('princurve')
procedure = princurve.principal_curve
fitpc = procedure(TSNE_data, NULL, 0.001, TRUE, 200, 2, 'lowess')
curve_projections_matrix = np.array(fitpc.rx('s')[0])
pseudotime_series = np.array(fitpc.rx('lambda')[0])
with open(path.join(output_directory, "{0}_TSNE_d{1}_pcv.tsv".format(tag,
N_dim)), 'w') as f:
np.savetxt(f, curve_projections_matrix, fmt = '%.6f', delimiter = '\t')
with open(path.join(output_directory, "{0}_TSNE_d{1}_lambda.tsv".format(tag,
N_dim)), 'w') as f:
np.savetxt(f, pseudotime_series, fmt = '%.6f', delimiter = '\t')
else:
print("ERROR: PySCUBA: Preprocessing: infer_pseudotime:\n"
"your choice of method for principal curve analysis is not supported "
"by the present version of PySCUBA.")
exit(1)
if anchor_gene:
assert isinstance(anchor_gene, str)
assert markers is not None
N_cells_anchor = 1000
gene_idx = np.where(markers == anchor_gene)[0]
pseudotime_idx = np.argsort(pseudotime_series)
anchor_gene_avg_beg = np.mean(data[pseudotime_idx[:N_cells_anchor], gene_idx])
anchor_gene_avg_end = np.mean(data[pseudotime_idx[N_cells_anchor:], gene_idx])
if anchor_gene_avg_end > anchor_gene_avg_beg:
pseudotime_series = np.max(pseudotime_series) - pseudotime_series
t_min = np.min(pseudotime_series)
t_max = np.max(pseudotime_series)
t_bins = 8
cell_stages = t_bins * (pseudotime_series - t_min + 0.0001) / (t_max - t_min + 0.0002)
cell_stages = np.ceil(cell_stages).astype(int).astype('str')
return cell_stages
def parse_pairs(text):
"""Return (key, value) pairs from a string featuring particular delimiters.
Modified from a corresponding function in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
delim = text[0]
if delim == r'|':
delim = '\|'
elif delim == r'\a'[0]:
delim = '\\\\'
if delim != text[-1]:
warn("WARNING: the text does not start and end with the same delimiter!")
regex = re.compile('(?<=[^%s])%s(?!%s)' % (delim, delim, delim))
tmp = text[1:-1].replace('$', '')
tmp = regex.split(tmp)
return dict(zip([x.lower() for x in tmp[::2]], tmp[1::2]))
class Annotation(object):
"""An annotation class instance stores meta-data from the recordings of a
cytometry experiment.
Modified from a corresponding class in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, annotations = None):
if annotations == None:
annotations = {}
self.__dict__['_mydict'] = annotations
def __getattr__(self, name):
if name in self._mydict.keys():
self.__dict__[name] = self._mydict[name]
return self._mydict[name]
else:
try:
return self._mydict.__getattribute__(name)
except:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
def __getstate__(self):
return self._mydict
def __setstate__(self, dict):
self.__dict__['_mydict'] = dict
for i in dict.keys():
self.__dict__[i] = dict[i]
def __setattr__(self, name, value):
Annotation.__getattribute__(self, '_mydict')[name] = value
self.__dict__[name] = value
def __setitem__(self, name, value):
self._mydict[name] = value
self.__dict__[name] = self._mydict[name]
def __getitem__(self, name):
return self._mydict[name]
def __repr__(self):
return 'Annotation(' + self._mydict.__repr__() + ')'
def __getstate__(self):
return self.__dict__
def __setstate(self, state):
self.__dict__ = state
def __getinitargs__(self):
return (self._mydict,)
def copy(self):
return Annotation(self._mydict.copy())
class Cyto_data(object):
"""
A Cyto_data object stores the data from a cytometry experiment.
Modified from a corresponding class in the outdated 'fcm' Python
package by Jacob Frelinger.
Members:
--------
Cyto_data.data_points : numpy.array
The data points.
Cyto_data.channels : list
Records which markers or scatters are in which columns.
Cyto_data.scatters : list
Keeps track of which indexes in Cyto_data.channels are scatters.
"""
def __init__(self, name, data_points, channels, scatters = None, notes = None):
"""
Parameters
----------
name: name of the *.fcs file, barring any extension
data_points: an array of data points
channels: list
Records which markers/scatters are in which columns.
scatters: list
Which channels indexes denote scatters
"""
self.name = name
self.data_points = data_points
self.tree = Tree_classes.Tree(data_points, channels)
self.scatters = scatters
self.markers = []
if self.scatters is not None:
for channel in range(len(channels)):
if channel in self.scatters:
pass
elif self.tree.root.channels[channel] in self.scatters:
pass
else:
self.markers.append(channel)
if notes == None:
notes = Annotation()
self.notes = notes
def __unicode__(self):
return self.name
def __repr__(self):
return self.name
def __getitem__(self, item):
"""Return the Cyto_data points.
"""
if isinstance(item, tuple):
item = list(item)
if isinstance(item[1], str):
item[1] = self.name_to_index(item[1])
elif isinstance(item[1], tuple) or isinstance(item[1], list):
item[1] = list(item[1])
for i, j in enumerate(item[1]):
if isinstance(j, str):
print('{0} is string {1}'.format(i, j))
item[1][i] = self.name_to_index(j)
item = tuple(item)
return self.tree.view()[item]
@property
def channels(self):
return self.current_node.channels
def __getattr__(self, name):
if name in dir(self.current_node.view()):
return self.current_node.view().__getattribute__(name)
else:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
for i in dict.keys():
self.__dict__[i] = dict[i]
def name_to_index(self, channels):
"""Return the channel indexes for the channels provided as arguments.
"""
if isinstance(channels, str):
return self.channels.index(channels)
idx = []
for i in channels:
try:
idx.append(self.channels.index(i))
except ValueError:
try:
for j in range(1, int(self.notes.text['par']) + 1):
if i == self.notes.text['p%dn' % j]:
idx.append(self.channels.index(self.notes.text['p%ds' % j]))
except ValueError:
raise ValueError('{0} is not in list'.format(i))
if idx:
return idx
else:
raise ValueError('The field {0} was not found'.format(str(channels)))
def get_channel_by_name(self, channels):
"""Return the data associated with specific channel names.
"""
return self.tree.view()[:, self.name_to_index(channels)]
def get_markers(self):
"""Return the data associated with all the markers.
"""
return self.view()[:, self.markers]
def view(self):
"""Return the current view of the data.
"""
return self.tree.view()
def visit(self, name):
"""Switch the current view of the data.
"""
self.tree.visit(name)
@property
def current_node(self):
"""Return the current node.
"""
return self.tree.current
def copy(self):
"""Return a copy of the Cyto_data object.
"""
tname = self.name
tree_data_points = self.tree.root.data
tnotes = self.notes.copy()
tchannels = self.channels[:]
tscchannels = self.scatters[:]
tmp = Cyto_data(tname, tree_data_points, tchannels, tscchannels, tnotes)
from copy import deepcopy
tmp.tree = deepcopy(self.tree)
return tmp
def gate(self, g, chan=None):
"""Return a gated region of the cytometry data.
"""
return g.gate(self, chan)
def subsample(self, s):
"""Return subsampled cytometry data.
"""
return s.subsample(self)
def get_current_node(self):
"""Return the current node.
"""
return self.current_node
def add_view(self, node):
"""Add a new node to the visualization tree.
"""
self.tree.add_child(node.name, node)
return self
def summary(self):
"""Provide a summary of current view.
"""
data_points = self.view()
means = data_points.mean(0)
stds = data_points.std(0)
mins = data_points.min(0)
maxs = data_points.max(0)
medians = np.median(data_points, 0)
dim = data_points.shape[1]
summary = ''
for i in range(dim):
summary = summary + self.channels[i] + ":\n"
summary = summary + " max: " + str(maxs[i]) + "\n"
summary = summary + " mean: " + str(means[i]) + "\n"
summary = summary + " median: " + str(medians[i]) + "\n"
summary = summary + " min: " + str(mins[i]) + "\n"
summary = summary + " std: " + str(stds[i]) + "\n"
return summary
def boundary_events(self):
"""Return a dictionary of the percentage of all events that are in the first
and in the last channel for each channel.
"""
boundary_dict = {}
for k, channel in enumerate(self.channels):
col = self.view()[:, k]
boundary_dict[channel] = \
sum((col == min(col)) | (col == max(col))) / len(col)
return boundary_dict
class NotSupportedFCSDataMode(Exception):
"""Exception raised for data modes in a *.fcs file that are not currently supported.
Modified from a corresponding exception in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, mode):
self.mode = mode
self.message = "Unfortunately, the FCS data stored as type {0} is not currently supported.".format(mode)
self.args = (mode,)
def integer_format(b):
"""Return the binary format of an integer.
"""
if b == 8:
return 'B'
elif b == 16:
return 'H'
elif b == 32:
return 'I'
else:
print("Cannot handle integers of bit size {0}.".format(b))
return None
def integer_bit_mask(b, ub):
"""Return the bit-mask of an integer and a bit-witdh.
"""
if b == 8:
return (0xFF >> (b - ub))
elif b == 16:
return (0xFFFF >> (b - ub))
elif b == 32:
return (0xFFFFFFFF >> (b - ub))
else:
print("Cannot handle integers of bit size {0}.".format(b))
return None
def fluorescent_channel(name):
"""Check if a channel is a fluorescent channel.
"""
name = name.lower()
if name.startswith('cs'):
return False
elif name.startswith('fs'):
return False
elif name.startswith('ss'):
return False
elif name.startswith('ae'):
return False
elif name.startswith('cv'):
return False
elif name.startswith('time'):
return False
else:
return True
class FCS_handler(object):
"""Hold object to read and parse *.fcs files.
Modified from a corresponding class in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, file_path):
self.file_path = file_path
self.current_offset = 0
def get_next_dataset(self, **kwargs):
"""Return the next cytometry dataset stored in a *.fcs file.
"""
with open(self.file_path, 'rb') as self._f:
header = self.parse_header(self.current_offset)
text = self.parse_text(self.current_offset, header['text_start'],
header['text_stop'])
try:
analysis_beg = text['begin_analysis']
except KeyError:
analysis_beg = header['analysis_start']
try:
analysis_end = text['end_analysis']
except KeyError:
analysis_end = header['analysis_end']
analysis = self.parse_analysis(self.current_offset, analysis_beg,
analysis_end)
try:
data_beg = int(text['begin_data'])
except KeyError:
data_beg = header['data_start']
try:
data_end = int(text['end_data'])
except KeyError:
data_end = header['data_end']
LMD = self.fix_LMD(self.current_offset, header['text_start'],
header['text_stop'])
data_end = data_end + LMD
data = self.parse_data(self.current_offset, data_beg, data_end, text)
channels = []
scchannels = []
scchannel_indexes = []
base_chan_name = []
for i in range(1, int(text['par']) + 1):
base_chan_name.append(text['p%dn' % i])
try:
if text['p%ds' % i] not in ['',' ']:
name = text['p%ds' % i]
else:
name = text['p%dn' % i]
except KeyError:
name = text['p%dn' % i]
channels.append(name)
if not fluorescent_channel(name):
scchannels.append(name)
if name != 'Time':
scchannel_indexes.append(i - 1)
_, name = path.split(self.file_path)
name, _ = path.splitext(name)
cyto_object = Cyto_data(name, data, channels, scchannels,
Annotation({'text': text,
'header': header,
'analysis': analysis,}))
return cyto_object
def read_bytes(self, offset, start, stop):
"""Read bytes from start to stop, included.
"""
self._f.seek(offset + start)
return self._f.read(stop - start + 1)
def parse_header(self, offset):
"""
Parse the cytometry data in a *.fcs file at the specified offset
(accounting for the possibility of several data parts in the said file).
"""
header = {}
header['version'] = float(self.read_bytes(offset, 3, 5))
header['text_start'] = int(self.read_bytes(offset, 10, 17))
header['text_stop'] = int(self.read_bytes(offset, 18, 25))
header['data_start'] = int(self.read_bytes(offset, 26, 33))
header['data_end'] = int(self.read_bytes(offset, 34, 41))
try:
header['analysis_start'] = int(self.read_bytes(offset, 42, 49))
except ValueError:
header['analysis_start'] = -1
try:
header['analysis_end'] = int(self.read_bytes(offset, 50, 57))
except ValueError:
header['analysis_end'] = -1
return header
def parse_text(self, offset, start, stop):
"""Return the parsed text segment of a *.fcs file.
"""
text = self.read_bytes(offset, start, stop)
return parse_pairs(text)
def parse_analysis(self, offset, start, stop):
"""Return the parsed analysis part of the *.fcs file under consideration.
"""
if start == stop:
return {}
else:
text = self.read_bytes(offset, start, stop)
return parse_pairs(text)
def fix_LMD(self, offset, start, stop):
"""Handle the LMD format (embedded FCS format) and the way it counts,
which differs from other FCS formats.
"""
text = self.read_bytes(offset, start, stop)
if text[0] == text[-1]:
return 0
else:
return -1
def parse_data(self, offset, start, stop, text):
"""Return an array holding the data part of *.fcs file at hand.
"""
dtype = text['datatype']
mode = text['mode']
tot = int(text['tot'])
if mode == 'c' or mode == 'u':
raise NotSupportedFCSDataMode(mode)
if text['byteord'] == '1,2,3,4' or text['byteord'] == '1,2':
order = '<'
elif text['byteord'] == '4,3,2,1' or text['byteord'] == '2,1':
order = '>'
else:
warn("WARNING: unsupported byte order {0}; using default @".format(text['byteord']))
order = '@'
bit_width = []
data_range = []
for i in range(1, int(text['par']) + 1):
bit_width.append(int(text['p%db' % i]))
data_range.append(int(text['p%dr' % i]))
if dtype.lower() == 'i':
data = self.parse_int_data(offset, start, stop, bit_width, data_range,
tot, order)
elif dtype.lower() == 'f' or dtype.lower() == 'd':
data = self.parse_float_data(offset, start, stop, dtype.lower(), tot, order)
else:
data = self.parse_ASCII_data(offset, start, stop, bit_width, dtype,
tot, order)
return data
def parse_int_data(self, offset, start, stop, bit_width, data_range, tot, order):
"""Parse *.fcs file and return data as an integer list.
"""
if reduce(and_, [item in [8, 16, 32] for item in bit_width]):
if len(set(bit_width)) == 1:
num_items = (stop - start + 1) / calcsize(integer_format(bit_width[0]))
tmp = unpack('%s%d%s' % (order, num_items, integer_format(bit_width[0])),
self.read_bytes(offset, start, stop))
else:
unused_bit_widths = map(int, map(np.log2, data_range))
tmp = []
current = start
while current < stop:
for i, current_width in enumerate(bit_width):
bit_mask = integer_bit_mask(current_width, unused_bit_widths[i])
N_bytes = current_width / 8
bin_string = self.read_bytes(offset, current, current + N_bytes - 1)
current += N_bytes
val = bit_mask & unpack('%s%s' % (order, integer_format(current_width)), bin_string)[0]
tmp.append(val)
else:
warn('WARNING: non-standard bit widths for the data part.')
return None
return np.array(tmp).reshape((tot, len(bit_width)))
def parse_float_data(self, offset, start, stop, dtype, tot, order):
"""Parse a *.fcs file and return list of float data entries.
"""
N_items = (stop - start + 1) / calcsize(dtype)
tmp = unpack('%s%d%s' % (order, N_items, dtype),
self.read_bytes(offset, start, stop))
return np.array(tmp).reshape((tot, len(tmp) / tot))
def parse_ASCII_data(self, offset, start, stop, bit_width, dtype, tot, order):
"""Parse ASCII encoded data from a *.fcs file.
"""
N_items = (stop - start + 1) / calcsize(dtype)
tmp = unpack('%s%d%s' % (order, N_items, dtype),
self.read_bytes(offset, start, stop))
return np.array(tmp).reshape((tot, len(tmp) / tot))
def cytometry_preprocess(file_path, log_mode = False, pseudotime_mode = True,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
data_tag, output_directory = create_output_directory(file_path)
cyto_object = get_FCS_data(file_path)
marker_idx = np.array(cyto_objects.markers, dtype = str)
marker_names = np.array(cyto_objects.channels[marker_idx], dtype = str)
data = cyto_object.data_points
data = data[:, marker_idx]
cell_IDs = np.array(['cell_{0}'.format(i) for i in xrange(1, data.shape[0] + 1)],
dtype = str)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(marker_names == name)[0])
data = np.delete(data, indices, axis = 1)
marker_names = np.delete(marker_names, indices)
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, marker_names)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, marker_names, cell_stages.astype(float), data_tag, output_directory
def PCR_preprocess(file_path, log_mode = False, pseudotime_mode = False,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
low_gene_fraction_max = 0.8
data_tag, output_directory = create_output_directory(file_path)
cell_IDs, cell_stages, data = get_PCR_or_RNASeq_data(file_path, pseudotime_mode)
with open(file_path, 'r') as f:
markers = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = 1 if pseudotime_mode else 2, usecols = [0])
markers.reshape(markers.size)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(markers == name)[0])
data = np.delete(data, indices, axis = 1)
markers = np.delete(markers, indices)
if pseudotime_mode:
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, markers)
condition = np.mean(data == 0, axis = 0) < low_gene_fraction_max
data = np.compress(condition, data, 1)
markers = np.compress(condition, markers)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, markers, cell_stages.astype(float), data_tag, output_directory
def RNASeq_preprocess(file_path, log_mode = True, pseudotime_mode = False,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
assert isinstance(log_mode, bool)
assert isinstance(pseudotime_mode, bool)
# Threshold value for genes of low expression levels
low_gene_threshold = 1
# Maximum fraction of lowly-expressed cells allowed for each gene
low_gene_fraction_max = 0.7
# Number of highly variable genes selected
N_selected_genes = 1000
data_tag, output_directory = create_output_directory(file_path)
cell_IDs, cell_stages, data = get_PCR_or_RNASeq_data(file_path, pseudotime_mode)
with open(file_path, 'r') as f:
markers = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = 1 if pseudotime_mode else 2, usecols = [0])
markers.reshape(markers.size)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(markers == name)[0])
data = np.delete(data, indices, axis = 1)
markers = np.delete(markers, indices)
if pseudotime_mode:
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, markers)
condition = np.mean(data < low_gene_threshold, axis = 0) < low_gene_fraction_max
data = np.compress(condition, data, 1)
markers = np.compress(condition, markers)
Fano_factors = np.var(data, axis = 0) / np.mean(data, axis = 0).astype(float)
idx = np.argsort(Fano_factors)[::-1][:N_selected_genes]
data = data[:, idx]
markers = markers[idx]
if log_mode:
np.log2(data + 1, data)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, markers, cell_stages.astype(float), data_tag, output_directory
def create_output_directory(file_path):
data_tag = path.basename(path.abspath(file_path)).split('.')[0]
output_directory = path.join(getcwd(), 'SCUBA_analysis_of_{0}'.format(data_tag))
try:
makedirs(output_directory)
except OSError:
if not path.isdir(output_directory):
raise
return data_tag, output_directory
def get_FCS_data(file_path, **kwargs):
"""Return a data object from an *.fcs file"""
cyto_object = FCS_handler(file_path)
data = cyto_object.get_next_dataset(**kwargs)
cyto_object._f.close()
del cyto_object
return data
def get_PCR_or_RNASeq_data(file_path, pseudotime_mode = False):
with open(file_path, 'r') as f:
cell_IDs = f.readline().rstrip('\n').split('\t')
cell_IDs = np.array(cell_IDs[1:], dtype = str)
if pseudotime_mode:
cell_stages = np.empty(0, dtype = float)
else:
cell_stages = f.readline().rstrip('\n').split('\t')
cell_stages = np.array(cell_stages[1:], dtype = str)
data = np.loadtxt(f, dtype = float, delimiter = '\t',
usecols = xrange(1, len(cell_IDs) + 1))
data = data.T
return cell_IDs, cell_stages, data
def write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers):
processed_data_path = path.join(output_directory, 'processed_data.tsv')
with open(processed_data_path, 'w') as f:
f.write('\t'.join(cell_IDs))
f.write('\n')
f.write('\t'.join(cell_stages))
f.write('\n')
np.savetxt(f, data.T, fmt = '%.6f', delimiter = '\t')
dataset = np.genfromtxt(processed_data_path, delimiter = '\t', dtype = str)
dataset = np.insert(dataset, 0, np.append(['Cell ID', 'Stage'],
markers), axis = 1)
with open(processed_data_path, 'w') as f:
np.savetxt(f, dataset, fmt = '%s', delimiter = '\t')
| python | 29,858 |
# -*- coding: utf-8 -*-
from collections import namedtuple
from owlready2 import get_ontology
from harrastuspassi.models import HobbyCategory
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from django.conf import settings
# there must be a better way
app_label = settings.INSTALLED_APPS[0]
Root = namedtuple('Root', ['label', 'model_name'])
ROOT_CLASSES = {
'categories': Root(label='Harrastusmuoto', model_name='hobbycategory'),
'audiences': Root(label='Yleisö', model_name='hobbyaudience')}
class Command(BaseCommand):
help = 'Import category and audience keywords from .owl file.'
def add_arguments(self, parser):
parser.add_argument('--path', action='store', dest='path_to_owl_file', default='./ontology.owl',
help='Provide a path to the .owl file.')
parser.add_argument('--categories', action='store_const', dest='import_categories', default=False,
const='categories', help='Import the hobby categories (harrastusmuodot)')
parser.add_argument('--audiences', action='store_const', dest='import_audiences', default=False,
const='audiences', help='Import the keywords that define the audience (koululaiset, lapset jne.).')
def handle(self, *args, **options):
ontology = get_ontology(options['path_to_owl_file']).load()
self.process_option(options['import_categories'], ontology)
self.process_option(options['import_audiences'], ontology)
if not any([options['import_categories'], options['import_audiences']]):
self.stderr.write('Specify --categories and/or --audiences to import onotologies.')
def process_option(self, option: str, ontology):
if not option:
return
root_class = ontology.search_one(label=ROOT_CLASSES[option].label)
self.depth = 0
model = ContentType.objects.get(app_label=app_label,
model=ROOT_CLASSES[option].model_name
).model_class()
self.add_subclasses_as_categories(root_class, model)
def add_subclasses_as_categories(self, parent_class, model, parent_hobby_category=None):
for subclass in parent_class.subclasses():
[origin_id] = subclass.identifier or ['']
if not origin_id:
data_source = ''
else:
data_source = 'yso'
labels = subclass.label
name_fi, name_sv, name_en = '', '', ''
for label in labels:
label_lang = getattr(label, 'lang', 'fi')
if label_lang == 'fi':
name_fi = label
elif label_lang == 'sv':
name_sv = label
elif label_lang == 'en':
name_en = label
hobby_category, created = model.objects.update_or_create(
name_fi=name_fi,
parent=parent_hobby_category,
defaults={
'name_sv': name_sv,
'name_en': name_en,
'data_source': data_source,
'origin_id': origin_id
}
)
indent = '--' * self.depth
self.stdout.write(f'{indent}fi_{name_fi}, sv_{name_sv}, en_{name_en}')
self.depth += 1
self.add_subclasses_as_categories(subclass, model, hobby_category)
self.depth -= 1
| python | 3,240 |
from __future__ import print_function, absolute_import, division
from astropy import units as u
from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal
def assert_allclose(q1, q2, **kwargs):
"""
Quantity-safe version of Numpy's assert_allclose
Copyright (c) 2014, spectral-cube developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity):
assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs)
elif isinstance(q1, u.Quantity):
assert_allclose_numpy(q1.value, q2, **kwargs)
elif isinstance(q2, u.Quantity):
assert_allclose_numpy(q1, q2.value, **kwargs)
else:
assert_allclose_numpy(q1, q2, **kwargs) | python | 2,265 |
"""
API that are reported to numba.cuda
"""
import contextlib
import numpy as np
from .cudadrv import devicearray, devices, driver
from numba.core import config
# NDarray device helper
require_context = devices.require_context
current_context = devices.get_context
gpus = devices.gpus
@require_context
def from_cuda_array_interface(desc, owner=None, sync=True):
"""Create a DeviceNDArray from a cuda-array-interface description.
The ``owner`` is the owner of the underlying memory.
The resulting DeviceNDArray will acquire a reference from it.
If ``sync`` is ``True``, then the imported stream (if present) will be
synchronized.
"""
version = desc.get('version')
# Mask introduced in version 1
if 1 <= version:
mask = desc.get('mask')
# Would ideally be better to detect if the mask is all valid
if mask is not None:
raise NotImplementedError('Masked arrays are not supported')
shape = desc['shape']
strides = desc.get('strides')
dtype = np.dtype(desc['typestr'])
shape, strides, dtype = _prepare_shape_strides_dtype(
shape, strides, dtype, order='C')
size = driver.memory_size_from_info(shape, strides, dtype.itemsize)
devptr = driver.get_devptr_for_active_ctx(desc['data'][0])
data = driver.MemoryPointer(
current_context(), devptr, size=size, owner=owner)
stream_ptr = desc.get('stream', None)
if stream_ptr is not None:
stream = external_stream(stream_ptr)
if sync and config.CUDA_ARRAY_INTERFACE_SYNC:
stream.synchronize()
else:
stream = 0 # No "Numba default stream", not the CUDA default stream
da = devicearray.DeviceNDArray(shape=shape, strides=strides,
dtype=dtype, gpu_data=data,
stream=stream)
return da
def as_cuda_array(obj, sync=True):
"""Create a DeviceNDArray from any object that implements
the :ref:`cuda array interface <cuda-array-interface>`.
A view of the underlying GPU buffer is created. No copying of the data
is done. The resulting DeviceNDArray will acquire a reference from `obj`.
If ``sync`` is ``True``, then the imported stream (if present) will be
synchronized.
"""
if not is_cuda_array(obj):
raise TypeError("*obj* doesn't implement the cuda array interface.")
else:
return from_cuda_array_interface(obj.__cuda_array_interface__,
owner=obj, sync=sync)
def is_cuda_array(obj):
"""Test if the object has defined the `__cuda_array_interface__` attribute.
Does not verify the validity of the interface.
"""
return hasattr(obj, '__cuda_array_interface__')
@require_context
def to_device(obj, stream=0, copy=True, to=None):
"""to_device(obj, stream=0, copy=True, to=None)
Allocate and transfer a numpy ndarray or structured scalar to the device.
To copy host->device a numpy array::
ary = np.arange(10)
d_ary = cuda.to_device(ary)
To enqueue the transfer to a stream::
stream = cuda.stream()
d_ary = cuda.to_device(ary, stream=stream)
The resulting ``d_ary`` is a ``DeviceNDArray``.
To copy device->host::
hary = d_ary.copy_to_host()
To copy device->host to an existing array::
ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype)
d_ary.copy_to_host(ary)
To enqueue the transfer to a stream::
hary = d_ary.copy_to_host(stream=stream)
"""
if to is None:
to, new = devicearray.auto_device(obj, stream=stream, copy=copy)
return to
if copy:
to.copy_to_device(obj, stream=stream)
return to
@require_context
def device_array(shape, dtype=np.float_, strides=None, order='C', stream=0):
"""device_array(shape, dtype=np.float_, strides=None, order='C', stream=0)
Allocate an empty device ndarray. Similar to :meth:`numpy.empty`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,
stream=stream)
@require_context
def managed_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
attach_global=True):
"""managed_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
attach_global=True)
Allocate a np.ndarray with a buffer that is managed.
Similar to np.empty().
Managed memory is supported on Linux, and is considered experimental on
Windows.
:param attach_global: A flag indicating whether to attach globally. Global
attachment implies that the memory is accessible from
any stream on any device. If ``False``, attachment is
*host*, and memory is only accessible by devices
with Compute Capability 6.0 and later.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memallocmanaged(bytesize,
attach_global=attach_global)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray)
managedview.device_setup(buffer, stream=stream)
return managedview
@require_context
def pinned_array(shape, dtype=np.float_, strides=None, order='C'):
"""pinned_array(shape, dtype=np.float_, strides=None, order='C')
Allocate an :class:`ndarray <numpy.ndarray>` with a buffer that is pinned
(pagelocked). Similar to :func:`np.empty() <numpy.empty>`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides,
dtype.itemsize)
buffer = current_context().memhostalloc(bytesize)
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
@require_context
def mapped_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
portable=False, wc=False):
"""mapped_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
portable=False, wc=False)
Allocate a mapped ndarray with a buffer that is pinned and mapped on
to the device. Similar to np.empty()
:param portable: a boolean flag to allow the allocated device memory to be
usable in multiple devices.
:param wc: a boolean flag to enable writecombined allocation which is faster
to write by the host and to read by the device, but slower to
write by the host and slower to write by the device.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memhostalloc(bytesize, mapped=True)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
mappedview.device_setup(buffer, stream=stream)
return mappedview
@contextlib.contextmanager
@require_context
def open_ipc_array(handle, shape, dtype, strides=None, offset=0):
"""
A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is
represented as a sequence of bytes (e.g. *bytes*, tuple of int)
and represent it as an array of the given *shape*, *strides* and *dtype*.
The *strides* can be omitted. In that case, it is assumed to be a 1D
C contiguous array.
Yields a device array.
The IPC handle is closed automatically when context manager exits.
"""
dtype = np.dtype(dtype)
# compute size
size = np.prod(shape) * dtype.itemsize
# manually recreate the IPC mem handle
handle = driver.drvapi.cu_ipc_mem_handle(*handle)
# use *IpcHandle* to open the IPC memory
ipchandle = driver.IpcHandle(None, handle, size, offset=offset)
yield ipchandle.open_array(current_context(), shape=shape,
strides=strides, dtype=dtype)
ipchandle.close()
def synchronize():
"Synchronize the current context."
return current_context().synchronize()
def _prepare_shape_strides_dtype(shape, strides, dtype, order):
dtype = np.dtype(dtype)
if isinstance(shape, int):
shape = (shape,)
if isinstance(strides, int):
strides = (strides,)
else:
if shape == ():
shape = (1,)
strides = strides or _fill_stride_by_order(shape, dtype, order)
return shape, strides, dtype
def _fill_stride_by_order(shape, dtype, order):
nd = len(shape)
strides = [0] * nd
if order == 'C':
strides[-1] = dtype.itemsize
for d in reversed(range(nd - 1)):
strides[d] = strides[d + 1] * shape[d + 1]
elif order == 'F':
strides[0] = dtype.itemsize
for d in range(1, nd):
strides[d] = strides[d - 1] * shape[d - 1]
else:
raise ValueError('must be either C/F order')
return tuple(strides)
def _contiguous_strides_like_array(ary):
"""
Given an array, compute strides for a new contiguous array of the same
shape.
"""
# Don't recompute strides if the default strides will be sufficient to
# create a contiguous array.
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
return None
# Otherwise, we need to compute new strides using an algorithm adapted from
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
# core/src/multiarray/ctors.c. We permute the strides in ascending order
# then compute the stride for the dimensions with the same permutation.
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key=lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
return tuple(strides)
def _order_like_array(ary):
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
return 'F'
else:
return 'C'
def device_array_like(ary, stream=0):
"""
Call :func:`device_array() <numba.cuda.device_array>` with information from
the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream)
def mapped_array_like(ary, stream=0, portable=False, wc=False):
"""
Call :func:`mapped_array() <numba.cuda.mapped_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream, portable=portable, wc=wc)
def pinned_array_like(ary):
"""
Call :func:`pinned_array() <numba.cuda.pinned_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order)
# Stream helper
@require_context
def stream():
"""
Create a CUDA stream that represents a command queue for the device.
"""
return current_context().create_stream()
@require_context
def default_stream():
"""
Get the default CUDA stream. CUDA semantics in general are that the default
stream is either the legacy default stream or the per-thread default stream
depending on which CUDA APIs are in use. In Numba, the APIs for the legacy
default stream are always the ones in use, but an option to use APIs for
the per-thread default stream may be provided in future.
"""
return current_context().get_default_stream()
@require_context
def legacy_default_stream():
"""
Get the legacy default CUDA stream.
"""
return current_context().get_legacy_default_stream()
@require_context
def per_thread_default_stream():
"""
Get the per-thread default CUDA stream.
"""
return current_context().get_per_thread_default_stream()
@require_context
def external_stream(ptr):
"""Create a Numba stream object for a stream allocated outside Numba.
:param ptr: Pointer to the external stream to wrap in a Numba Stream
:type ptr: int
"""
return current_context().create_external_stream(ptr)
# Page lock
@require_context
@contextlib.contextmanager
def pinned(*arylist):
"""A context manager for temporary pinning a sequence of host ndarrays.
"""
pmlist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=False)
pmlist.append(pm)
yield
@require_context
@contextlib.contextmanager
def mapped(*arylist, **kws):
"""A context manager for temporarily mapping a sequence of host ndarrays.
"""
assert not kws or 'stream' in kws, "Only accept 'stream' as keyword."
stream = kws.get('stream', 0)
pmlist = []
devarylist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=True)
pmlist.append(pm)
devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)
devarylist.append(devary)
try:
if len(devarylist) == 1:
yield devarylist[0]
else:
yield devarylist
finally:
# When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name
# `mapped_arrs` stays in scope, blocking automatic unmapping based on
# reference count. We therefore invoke the finalizer manually.
for pm in pmlist:
pm.free()
def event(timing=True):
"""
Create a CUDA event. Timing data is only recorded by the event if it is
created with ``timing=True``.
"""
evt = current_context().create_event(timing=timing)
return evt
event_elapsed_time = driver.event_elapsed_time
# Device selection
def select_device(device_id):
"""
Make the context associated with device *device_id* the current context.
Returns a Device instance.
Raises exception on error.
"""
context = devices.get_context(device_id)
return context.device
def get_current_device():
"Get current device associated with the current thread"
return current_context().device
def list_devices():
"Return a list of all detected devices"
return devices.gpus
def close():
"""
Explicitly clears all contexts in the current thread, and destroys all
contexts if the current thread is the main thread.
"""
devices.reset()
def _auto_device(ary, stream=0, copy=True):
return devicearray.auto_device(ary, stream=stream, copy=copy)
def detect():
"""
Detect supported CUDA hardware and print a summary of the detected hardware.
Returns a boolean indicating whether any supported devices were detected.
"""
devlist = list_devices()
print('Found %d CUDA devices' % len(devlist))
supported_count = 0
for dev in devlist:
attrs = []
cc = dev.compute_capability
attrs += [('compute capability', '%d.%d' % cc)]
attrs += [('pci device id', dev.PCI_DEVICE_ID)]
attrs += [('pci bus id', dev.PCI_BUS_ID)]
if cc < (2, 0):
support = '[NOT SUPPORTED: CC < 2.0]'
else:
support = '[SUPPORTED]'
supported_count += 1
print('id %d %20s %40s' % (dev.id, dev.name, support))
for key, val in attrs:
print('%40s: %s' % (key, val))
print('Summary:')
print('\t%d/%d devices are supported' % (supported_count, len(devlist)))
return supported_count > 0
@contextlib.contextmanager
def defer_cleanup():
"""
Temporarily disable memory deallocation.
Use this to prevent resource deallocation breaking asynchronous execution.
For example::
with defer_cleanup():
# all cleanup is deferred in here
do_speed_critical_code()
# cleanup can occur here
Note: this context manager can be nested.
"""
with current_context().defer_cleanup():
yield
profiling = require_context(driver.profiling)
profile_start = require_context(driver.profile_start)
profile_stop = require_context(driver.profile_stop)
| python | 17,424 |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000028"
addresses_name = (
"2020-02-17T14:30:02.399947/EC & Democracy Club Polling Place Look Up 2020.csv"
)
stations_name = (
"2020-02-17T14:30:02.399947/EC & Democracy Club Polling Place Look Up 2020.csv"
)
elections = ["2020-05-07"]
csv_delimiter = ","
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if record.addressline2 == "179 Forest Hill Road":
return None
if record.addressline1 == "Excluding Third Floor and Fourth Floor":
return None
if record.addressline6 in [
"SE16 2EZ",
"SE21 7BG",
"SE1 0AA",
"SE1 0NS",
"SE15 4TP",
]:
return None
if uprn in [
"10094086807",
"200003480357",
"10093341595",
"10093341594",
"10093341119",
"10090283768",
"10094086939",
"10090747304",
"10093340214",
"10009806727",
"200003492155",
"10090748785",
]:
return None
if uprn == "200003487670":
rec["postcode"] = "SE1 2BB"
if uprn == "10091665680":
rec["uprn"] = ""
rec["postcode"] = "SE5 0EZ"
if uprn == "10093340235":
rec["postcode"] = "SE22 9EE"
return rec
| python | 1,642 |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright(c) 2019 Aalborg University
# Joakim Bruslund Haurum, May 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
import csv
import json
import datetime
import cv2
import argparse
import os
import RainGauges as RG
try:
to_unicode = unicode
except NameError:
to_unicode = str
def saveJSON(filename, data):
'''
Takes a dict of dicts and saves to a JSON file
Input:
filename: path to the output file
data: Dict of dict containing the label data
'''
with open(filename, 'w', encoding='utf8') as outfile:
str_ = json.dumps(data,
indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False)
outfile.write(to_unicode(str_))
def getGaugeValues(gaugeFilePath, startDateTime, lat, long, FPS, numFrames, hourOffset):
'''
Retrieves the precipation data from the provided gauge file, based on the provided location data
Input:
gaugeFilepath: Path to the gauge file
startDateTime: string of the format "DD-MM-YYYY HH:MM:SS"
lat: latitude of the location of the video
long: longitude of hte location of the video
FPS: Frame rate of the video
numFrames: Number of frames in the video
hourOffset: Offset between the startDateTime and the rain gauge (subtracted from startDateTime)
Output:
gaugeTimeStamps: List of timestamp of the precipation values
gaugeValues: Numpy array of precipation values
frameOffset: Frames left of the first minute
'''
# Load gauge
gauges = RG.RainGauges(gaugeFilePath)
# Find the nearest rain gauge and get the datetime stamps and rain measurements
video_name = "Some Road"
# Get start time
startDateTime = datetime.datetime.strptime(startDateTime, "%d-%m-%Y %H:%M:%S")
startSeconds = startDateTime.second
startDateTime -= datetime.timedelta(seconds = startSeconds)
startDateTime -= datetime.timedelta(hours = hourOffset)
# Calculate how many frames left of the starting minute e.g. 16:00:45, has 15 seconds left
# This corresponds to 450 frames (30 FPS), and we assume we are halfway through the second, so 435 frame offset
# These initial 435 frames are assigned to the label of 16:00:00, while the 436th label is assigned to 16:00:01
FPM = FPS * 60
if startSeconds > 0:
frameOffset = (60 - startSeconds) * FPS - int(FPS/2)
else:
frameOffset = 0
# Determine how many minutes the video spans
if numFrames > frameOffset:
minutesVideo = int(np.ceil((numFrames-frameOffset)/FPM))
else:
minutesVideo = 0
# Get the end time of the video
endDateTime = startDateTime + datetime.timedelta(minutes=minutesVideo)
# Get the rain measurements from the closest rain gauge
location = RG.Location(lat, long, video_name, 0)
measurement = gauges.getNearestRainData(location, startDateTime, endDateTime)
if measurement.perSecond:
gaugeTimeStamps = list(measurement.perSecond.keys())
gaugeValues = np.asarray(list(measurement.perSecond.values()))
return gaugeTimeStamps, gaugeValues, frameOffset
def generateLabels(args):
'''
Takes a csv file with information about the relevant videos.
Based on this information the precipation labels are found from the nearest rain gauge in the provided rain gauge file
The precipation labels are saved as a JSON file in a folder 'labels' in the work dir
Input:
args:
- csvFilePath: Path to the video csv file
- outputFile: Name of the output file
- binary: Whether to binarize the rain gauge data or not
- vid_label: Whether to use the label provided in the input csv file for the entire video
- precipation: Whether to use precipation data for labels
- hour_offset: How many hours to offset the rain gauge data by
- verbose: Whether to print information
'''
csv_file = args["csvFilePath"]
df = pd.read_csv(csv_file, sep = ",")
folder = "./labels"
if not os.path.exists(folder):
os.makedirs(folder)
binary_label = args["binary"]
vid_label = args["vid_label"]
precipation_label = args["precipation"]
hour_offset = args["hour_offset"]
label_dict = {}
verbose = args["verbose"]
# Go through each video in the supplied csv file
for index, row in df.iterrows():
FPS = row["FPS"]
gaugeFilePath = row["GaugeFilePath"]
videoFilePath = row["VideoFilePath"]
latitude = row["Latitude"]
longitude = row["Longitude"]
startDateTime = row["StartTime"]
supplied_label = row["FileLabel"]
if np.isfinite(supplied_label):
supplied_label = np.int(supplied_label)
if verbose:
print("Row {}\n\tGauge File Path: {}\n\tVideoFilePath: {}\n\tStart Datetime: {}\
\n\tLatitude: {}\n\tLongitude: {}\n\tFPS: {}\n\tFrame Label: {}\n".format(index,
gaugeFilePath,
videoFilePath,
startDateTime,
latitude,
longitude,
FPS,
supplied_label))
filename = os.path.basename(videoFilePath)
if not vid_label:
## Uses the supplied label for the ENTIRE video, and stored as a single number
labels = supplied_label
frameOffset = None
numFrames = None
timestamps = []
else:
# Load video information, in order to retrieve corresponding precipation data
# Labels are stored per minute in a list
cap = cv2.VideoCapture(videoFilePath)
if not cap.isOpened():
print ("Could not open {}".format(videoFilePath))
continue
numFrames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
vidFPS = int(cap.get(cv2.CAP_PROP_FPS))
if verbose:
print("Video {}:\n\tFrames: {}\n\tFPS: {}\n\n".format(videoFilePath, numFrames, vidFPS))
assert FPS == vidFPS, "The supplied FPS, {}, and video FPS, {}, differ".format(FPS,vidFPS)
# Get the gauge values for the video
timestamps, values, frameOffset = getGaugeValues(gaugeFilePath, startDateTime, latitude, longitude, FPS, numFrames, hour_offset)
if binary_label:
if precipation_label:
## Binarizes the rain gauge data and saves a value per minute
labels = (values.astype(np.bool)).astype(int)
else:
## Creates a list of the same length returned from the rain gauge data, but fills it with the supplied label
labels = np.ones(len(values),dtype=np.int)*supplied_label
else:
## Uses the direct rain gauge data as labels per minute
labels = values
# convert numpy array to a list
labels = labels.tolist()
# Save video label dict into dict
label_dict[filename] = {"labels": labels,
"frameOffset": frameOffset,
"timestamps": [x.strftime("%Y-%m-%d %H:%M:%S") for x in timestamps],
"frameCount": numFrames,
"FPM": FPS * 60}
if verbose:
print()
print(filename, numFrames, vidFPS)
print(label_dict[filename])
# save dict to JSON
saveJSON(os.path.join(folder,args["outputFile"]), label_dict)
if __name__ == "__main__":
ap = argparse.ArgumentParser(
description = "Generates precipation labels for each video in the provided csv video, based on data from the neasrest rain gauge")
ap.add_argument("-csvFilePath", "--csvFilePath", type=str, default="labelsCSV.csv",
help="Path to the csv file containing information per video")
ap.add_argument("-outputFile", "--outputFile", type=str, default = "labels.json",
help="Filename for the output JSON file. Saved in the active dir in a subdir called 'labels'")
ap.add_argument('--binary', action="store_true",
help='Use binary labels? If not set, continous labels are generated')
ap.add_argument('--precipation', action="store_true",
help='Use precipation data from rain guage for labels')
ap.add_argument('--vid_label', action="store_false",
help='Use specific label per frame/minute?')
ap.add_argument('--hour_offset', type=int, default=2,
help='How many hours to offset the raing guage data')
ap.add_argument('--verbose', action="store_true",
help='Whether information should be printed')
args = vars(ap.parse_args())
generateLabels(args) | python | 10,676 |
# Author: Christian Brodbeck <[email protected]>
from .._exceptions import DefinitionError
from .._text import enumeration, plural
from .._utils.parse import find_variables
class Definition:
DICT_ATTRS = None
def as_dict(self):
return {k: getattr(self, k) for k in self.DICT_ATTRS}
def __eq__(self, other):
if isinstance(other, dict):
return self.as_dict() == other
elif isinstance(other, Definition):
return self.as_dict() == other.as_dict()
else:
return False
def name_ok(key: str, allow_empty: bool) -> bool:
if not key and not allow_empty:
return False
try:
return all(c not in key for c in ' ')
except TypeError:
return False
def check_names(keys, attribute, allow_empty: bool):
invalid = [key for key in keys if not name_ok(key, allow_empty)]
if invalid:
raise DefinitionError(f"Invalid {plural('name', len(invalid))} for {attribute}: {enumeration(invalid)}")
def compound(items):
out = ''
for item in items:
if item == '*':
if not out.endswith('*'):
out += '*'
elif item:
if out and not out.endswith('*'):
out += ' '
out += item
return out
def dict_change(old, new):
"Readable representation of dict change"
lines = []
keys = set(new)
keys.update(old)
for key in sorted(keys):
if key not in new:
lines.append("%r: %r -> key removed" % (key, old[key]))
elif key not in old:
lines.append("%r: new key -> %r" % (key, new[key]))
elif new[key] != old[key]:
lines.append("%r: %r -> %r" % (key, old[key], new[key]))
return lines
def log_dict_change(log, kind, name, old, new):
log.warning(" %s %s changed:", kind, name)
for line in dict_change(old, new):
log.warning(" %s", line)
def log_list_change(log, kind, name, old, new):
log.warning(" %s %s changed:", kind, name)
removed = tuple(v for v in old if v not in new)
if removed:
log.warning(" Members removed: %s", ', '.join(map(str, removed)))
added = tuple(v for v in new if v not in old)
if added:
log.warning(" Members added: %s", ', '.join(map(str, added)))
def find_epoch_vars(params):
"Find variables used in a primary epoch definition"
out = set()
if params.get('sel'):
out.update(find_variables(params['sel']))
if 'trigger_shift' in params and isinstance(params['trigger_shift'], str):
out.add(params['trigger_shift'])
if 'post_baseline_trigger_shift' in params:
out.add(params['post_baseline_trigger_shift'])
return out
def find_epochs_vars(epochs):
"Find variables used in all epochs"
todo = list(epochs)
out = {}
while todo:
for e in tuple(todo):
p = epochs[e]
if 'sel_epoch' in p:
if p['sel_epoch'] in out:
out[e] = find_epoch_vars(p)
out[e].update(out[p['sel_epoch']])
todo.remove(e)
elif 'sub_epochs' in p:
if all(se in out for se in p['sub_epochs']):
out[e] = find_epoch_vars(p)
for se in p['sub_epochs']:
out[e].update(out[se])
todo.remove(e)
elif 'collect' in p:
if all(se in out for se in p['collect']):
out[e] = find_epoch_vars(p)
for se in p['collect']:
out[e].update(out[se])
todo.remove(e)
else:
out[e] = find_epoch_vars(p)
todo.remove(e)
return out
def find_dependent_epochs(epoch, epochs):
"Find all epochs whose definition depends on epoch"
todo = set(epochs).difference(epoch)
out = [epoch]
while todo:
last_len = len(todo)
for e in tuple(todo):
p = epochs[e]
if 'sel_epoch' in p:
if p['sel_epoch'] in out:
out.append(e)
todo.remove(e)
elif 'sub_epochs' in p:
if any(se in out for se in p['sub_epochs']):
out.append(e)
todo.remove(e)
elif 'collect' in p:
if any(se in out for se in p['collect']):
out.append(e)
todo.remove(e)
else:
todo.remove(e)
if len(todo) == last_len:
break
return out[1:]
def typed_arg(arg, type_):
return None if arg is None else type_(arg)
| python | 4,698 |
from .categorical_categorical import binary_contingency
from .continuous_categorical import (
binary_metrics,
multi_dists,
pr_curve,
roc_auc_curve,
two_dists,
)
from .continuous_continuous import (
dense_plot,
dense_regression,
regression,
scatter_grid,
)
from .utils import as_si, colorize
| python | 327 |
"""Sample agent implementation"""
import logging
from rich import logging as rich_logging
from ostorlab.agent import agent
from ostorlab.agent import message as m
logging.basicConfig(
format='%(message)s',
datefmt='[%X]',
level='INFO',
force=True,
handlers=[rich_logging.RichHandler(rich_tracebacks=True)]
)
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class HellWorldAgent(agent.Agent):
"""Hello world agent."""
def start(self) -> None:
"""TODO (author): add your description here."""
logger.info('running start')
def process(self, message: m.Message) -> None:
"""TODO (author): add your description here.
Args:
message:
Returns:
"""
# TODO (author): implement agent logic here.
del message
logger.info('processing message')
self.emit('v3.healthcheck.ping', {'body': 'Hello World!'})
if __name__ == '__main__':
logger.info('starting agent ...')
HellWorldAgent.main()
| python | 1,028 |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Import/ImportVectorField.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Import/ImportVectorField
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Import.ImportVectorField.get_data import get_data
except ImportError as error:
get_data = error
from ._check import InitUnKnowClassError
from .ImportData import ImportData
class ImportVectorField(FrozenClass):
"""Abstract class for Data Import/Generation"""
VERSION = 1
# cf Methods.Import.ImportVectorField.get_data
if isinstance(get_data, ImportError):
get_data = property(
fget=lambda x: raise_(
ImportError(
"Can't use ImportVectorField method get_data: " + str(get_data)
)
)
)
else:
get_data = get_data
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self, components=-1, name="", symbol="", init_dict=None, init_str=None
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "components" in list(init_dict.keys()):
components = init_dict["components"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "symbol" in list(init_dict.keys()):
symbol = init_dict["symbol"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.components = components
self.name = name
self.symbol = symbol
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
ImportVectorField_str = ""
if self.parent is None:
ImportVectorField_str += "parent = None " + linesep
else:
ImportVectorField_str += (
"parent = " + str(type(self.parent)) + " object" + linesep
)
if len(self.components) == 0:
ImportVectorField_str += "components = dict()" + linesep
for key, obj in self.components.items():
tmp = (
self.components[key].__str__().replace(linesep, linesep + "\t")
+ linesep
)
ImportVectorField_str += (
"components[" + key + "] =" + tmp + linesep + linesep
)
ImportVectorField_str += 'name = "' + str(self.name) + '"' + linesep
ImportVectorField_str += 'symbol = "' + str(self.symbol) + '"' + linesep
return ImportVectorField_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.components != self.components:
return False
if other.name != self.name:
return False
if other.symbol != self.symbol:
return False
return True
def compare(self, other, name="self"):
"""Compare two objects and return list of differences"""
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.components is None and self.components is not None) or (
other.components is not None and self.components is None
):
diff_list.append(name + ".components None mismatch")
elif len(other.components) != len(self.components):
diff_list.append("len(" + name + "components)")
else:
for key in self.components:
diff_list.extend(
self.components[key].compare(
other.components[key], name=name + ".components"
)
)
if other._name != self._name:
diff_list.append(name + ".name")
if other._symbol != self._symbol:
diff_list.append(name + ".symbol")
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
if self.components is not None:
for key, value in self.components.items():
S += getsizeof(value) + getsizeof(key)
S += getsizeof(self.name)
S += getsizeof(self.symbol)
return S
def as_dict(self):
"""Convert this object in a json seriable dict (can be use in __init__)"""
ImportVectorField_dict = dict()
if self.components is None:
ImportVectorField_dict["components"] = None
else:
ImportVectorField_dict["components"] = dict()
for key, obj in self.components.items():
if obj is not None:
ImportVectorField_dict["components"][key] = obj.as_dict()
else:
ImportVectorField_dict["components"][key] = None
ImportVectorField_dict["name"] = self.name
ImportVectorField_dict["symbol"] = self.symbol
# The class name is added to the dict for deserialisation purpose
ImportVectorField_dict["__class__"] = "ImportVectorField"
return ImportVectorField_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.components = None
self.name = None
self.symbol = None
def _get_components(self):
"""getter of components"""
if self._components is not None:
for key, obj in self._components.items():
if obj is not None:
obj.parent = self
return self._components
def _set_components(self, value):
"""setter of components"""
if type(value) is dict:
for key, obj in value.items():
if type(obj) is dict:
class_obj = import_class(
"pyleecan.Classes", obj.get("__class__"), "components"
)
value[key] = class_obj(init_dict=obj)
if type(value) is int and value == -1:
value = dict()
check_var("components", value, "{ImportData}")
self._components = value
components = property(
fget=_get_components,
fset=_set_components,
doc=u"""Dict of components (e.g. {"radial": ImportData})
:Type: {ImportData}
""",
)
def _get_name(self):
"""getter of name"""
return self._name
def _set_name(self, value):
"""setter of name"""
check_var("name", value, "str")
self._name = value
name = property(
fget=_get_name,
fset=_set_name,
doc=u"""Name of the vector field
:Type: str
""",
)
def _get_symbol(self):
"""getter of symbol"""
return self._symbol
def _set_symbol(self, value):
"""setter of symbol"""
check_var("symbol", value, "str")
self._symbol = value
symbol = property(
fget=_get_symbol,
fset=_set_symbol,
doc=u"""Symbol of the vector field
:Type: str
""",
)
| python | 8,571 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import re
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
# The function has a single return value.
allowed_type = _replace_forward_references(types[0], f.__globals__)
if not isinstance(return_value, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r"
% (return_value, type(return_value),
_type_repr(allowed_type), f))
else:
if len(return_value) != len(types):
raise Error(
"Function %r has %d return values but only %d types were "
"provided in the annotation." %
(f, len(return_value), len(types)))
for (r, t) in zip(return_value, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(r, allowed_type):
raise Error("%r of type %r is not an instance of allowed type %s "
"for %r" % (r, type(r), _type_repr(allowed_type), f))
return return_value
return new_f
return check_returns
| python | 10,165 |
'''
Problem 1:
Write a program that accepts sequence of lines one by one in command line
as input and prints the lines after making all characters in the sentence capitalized.
Suppose the following input is supplied to the program:
Hello world
Practice makes perfect
Then, the output should be:
HELLO WORLD
PRACTICE MAKES PERFECT
'''
import re
print("We are going to convert words entered by user to UpperCase")
in_no_of_lines = input("Enter Number of Lines to Convert:")
if not in_no_of_lines.isdigit():
print("Enter only number value for input line to begin!")
else:
no_of_lines=int(in_no_of_lines)
text = ""
line_num_initiate = 0
while line_num_initiate < no_of_lines:
line = input()
if not re.match(r'[A-Za-z ]+$', line):
print("Only Alpha letters & Space are allowed.Rerun the Program to proceed Again!")
break
line_num_initiate += 1
text = text + "\n" + line.upper()
if not text.strip() == "":
print("Converted String Result:")
print(text)
| python | 1,055 |
"""Contains show related classes."""
import tempfile
import hashlib
import os
import re
from collections import namedtuple
from mpf.core.assets import Asset, AssetPool
from mpf.core.config_validator import RuntimeToken
from mpf.core.utility_functions import Util
__api__ = ['Show', 'RunningShow', 'ShowPool']
ShowConfig = namedtuple("ShowConfig", ["name", "priority", "speed", "loops", "sync_ms", "manual_advance", "show_tokens",
"events_when_played", "events_when_stopped", "events_when_looped",
"events_when_paused", "events_when_resumed", "events_when_advanced",
"events_when_stepped_back", "events_when_updated", "events_when_completed"])
class ShowPool(AssetPool):
"""A pool of shows."""
__slots__ = []
def __repr__(self):
"""Return str representation."""
return '<ShowPool: {}>'.format(self.name)
@property
def show(self):
"""Return the next show."""
# TODO: getters should not modify state #348
return self.asset
# pylint: disable-msg=too-many-instance-attributes
class Show(Asset):
"""A show which can be instantiated."""
attribute = 'shows'
path_string = 'shows'
config_section = 'shows'
disk_asset_section = 'file_shows'
extensions = tuple('yaml')
class_priority = 100
pool_config_section = 'show_pools'
asset_group_class = ShowPool
__slots__ = ["_autoplay_settings", "tokens", "token_values", "token_keys", "name", "total_steps", "show_steps",
"loaded", "mode"]
# pylint: disable-msg=too-many-arguments
def __init__(self, machine, name, file=None, config=None, data=None):
"""Initialise show."""
super().__init__(machine, name, file, config)
self._autoplay_settings = dict()
self._initialize_asset()
self.tokens = set()
self.token_values = dict()
self.token_keys = dict()
self.name = name
self.total_steps = None
self.show_steps = None
if data:
self._do_load_show(data=data)
self.loaded = True
def __lt__(self, other):
"""Compare two instances."""
return id(self) < id(other)
def _initialize_asset(self):
self.loaded = False
self.show_steps = list()
self.mode = None
def do_load(self):
"""Load a show from disk."""
self._do_load_show(None)
def _get_duration(self, data, step_num, total_step_time):
total_steps_num = len(data)
step = data[step_num]
if 'duration' not in step:
if step_num == total_steps_num - 1:
# special case with an empty last step (but longer than 1 step)
if 'time' in step and len(step) == 1 and step_num != 0:
return False
return 1
elif 'time' in data[step_num + 1]:
next_step_time = data[step_num + 1]['time']
if str(next_step_time)[0] == "+":
return Util.string_to_secs(next_step_time)
else:
if total_step_time < 0: # pragma: no cover
self._show_validation_error("Absolute timing in step {} not possible because "
"there was a duration of -1 before".format(step_num))
return Util.string_to_secs(next_step_time) - total_step_time
else:
return 1
else:
if step_num < total_steps_num - 1 and 'time' in data[step_num + 1]: # pragma: no cover
self._show_validation_error("Found invalid 'time' entry in step after {} which contains a duration. "
"Remove either of them!".format(step_num))
return Util.string_to_secs(step['duration'])
def _do_load_show(self, data):
# do not use machine or the logger here because it will block
self.show_steps = list()
if not data and self.file:
data = self.load_show_from_disk()
# Pylint complains about the change from dict to list. This is intended and fine.
if isinstance(data, dict):
data = list(data)
elif not isinstance(data, list): # pragma: no cover
raise ValueError("Show {} does not appear to be a valid show "
"config".format(self.file))
if not data: # pragma: no cover
self._show_validation_error("Cannot load empty show")
total_step_time = 0
# add empty first step if show does not start right away
if 'time' in data[0] and data[0]['time'] != 0:
self.show_steps.append({'duration': Util.string_to_secs(data[0]['time'])})
total_step_time = Util.string_to_secs(data[0]['time'])
# Loop over all steps in the show file
for step_num, step in enumerate(data):
actions = dict()
# Note: all times are stored/calculated in seconds.
# Step time can be specified as either an absolute time elapsed
# (from the beginning of the show) or a relative time (time elapsed
# since the previous step). Time strings starting with a plus sign
# (+) are treated as relative times.
# Step times are all converted to relative times internally (time
# since the previous step).
# Make sure there is a time entry for each step in the show file.
duration = self._get_duration(data, step_num, total_step_time)
# special case: empty last step
if duration is False:
break
elif duration == 0: # pragma: no cover
self._show_validation_error("Step {} has 0 duration".format(step_num))
# Calculate the time since previous step
actions['duration'] = duration
if duration > 0 and total_step_time >= 0:
total_step_time += duration
else:
total_step_time = -1
# Now process show step actions
self._process_step_actions(step, actions)
self.show_steps.append(actions)
# Count how many total steps are in the show. We need this later
# so we can know when we're at the end of a show
self.total_steps = len(self.show_steps)
if self.total_steps == 0: # pragma: no cover
self._show_validation_error("Show is empty")
self._get_tokens()
def _show_validation_error(self, msg): # pragma: no cover
if self.file:
identifier = self.file
else:
identifier = self.name
raise AssertionError("Show {}: {}".format(identifier, msg))
def _process_step_actions(self, step, actions):
if not isinstance(step, dict):
raise AssertionError("Steps in show {} need to be dicts.".format(self.name))
for key, value in step.items():
# key: the section of the show, like 'leds'
# value: dic of express settings or dic of dics w full settings
# check to see if we know how to process this kind of entry
if key in self.machine.show_controller.show_players.keys():
actions[key] = self.machine.show_controller.show_players[key].validate_config_entry(value, self.name)
elif key != 'duration' and key != 'time': # pragma: no cover
self._show_validation_error('Invalid section "{}:" found in show {}'.format(key, self.name))
def _do_unload(self):
self.show_steps = None
def _get_tokens(self):
self._walk_show(self.show_steps)
def _walk_show(self, data, path=None, list_index=None):
# walks a list of dicts, checking tokens
if not path:
path = list()
if isinstance(data, dict):
for k, v in data.items():
self._check_token(path, k, 'key')
self._walk_show(v, path + [k])
elif isinstance(data, list):
for i in data:
self._check_token(path, i, 'key')
if list_index is None:
list_index = 0
else:
list_index += 1
self._walk_show(i, path + [list_index], list_index)
else:
self._check_token(path, data, 'value')
def get_show_steps(self, data='dummy_default!#$'):
"""Return a copy of the show steps."""
if data == 'dummy_default!#$':
data = self.show_steps
if isinstance(data, dict):
new_dict = dict()
for k, v in data.items():
new_dict[k] = self.get_show_steps(v)
return new_dict
elif isinstance(data, list):
new_list = list()
for i in data:
new_list.append(self.get_show_steps(i))
return new_list
return data
def _check_token(self, path, data, token_type):
if isinstance(data, RuntimeToken):
self._add_token(data, data.token, path, token_type)
return
if not isinstance(data, str):
return
results = re.findall(r"\(([^)]+)\)", data)
if results:
for result in results:
self._add_token(data, result, path, token_type)
def _add_token(self, placeholder, token, path, token_type):
if token not in self.tokens:
self.tokens.add(token)
if token_type == 'key':
if token not in self.token_keys:
self.token_keys[token] = list()
self.token_keys[token].append(path + [placeholder])
elif token_type == 'value':
if token not in self.token_values:
self.token_values[token] = list()
self.token_values[token].append(path)
def play_with_config(self, show_config: ShowConfig, start_time=None, start_callback=None, stop_callback=None,
start_step=None) -> "RunningShow":
"""Play this show with config."""
if self.loaded:
show_steps = self.get_show_steps()
else:
show_steps = False
if not start_time:
start_time = self.machine.clock.get_time()
running_show = RunningShow(machine=self.machine,
show=self,
show_steps=show_steps,
start_time=start_time,
start_step=int(start_step),
callback=stop_callback,
start_callback=start_callback,
show_config=show_config)
if not self.loaded:
self.load(callback=running_show.show_loaded, priority=show_config.priority)
return running_show
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def play(self, priority=0, speed=1.0, start_step=1, callback=None,
loops=-1, sync_ms=None, manual_advance=False, show_tokens=None,
events_when_played=None, events_when_stopped=None,
events_when_looped=None, events_when_paused=None,
events_when_resumed=None, events_when_advanced=None,
events_when_stepped_back=None, events_when_updated=None,
events_when_completed=None, start_time=None, start_callback=None) -> "RunningShow":
"""Play a Show.
There are many parameters you can use here which
affect how the show is played. This includes things like the playback
speed, priority, etc. These are
all set when the show plays. (For example, you could have a Show
file which lights a bunch of lights sequentially in a circle pattern,
but you can have that circle "spin" as fast as you want depending on
how you play the show.)
Args:
priority: Integer value of the relative priority of this show. If
there's ever a situation where multiple shows want to control
the same item, the one with the higher priority will win.
("Higher" means a bigger number, so a show with priority 2 will
override a priority 1.)
speed: Float of how fast your show runs. Your Show files
specify step times in actual time values. When you play a
show,
you specify a playback rate factor that is applied to the time
values in the show (divides the relative show times). The
default value is 1.0 (uses the actual time values in specified
in the show), but you might want to speed up (speed
values > 1.0) or slow down (speed values < 1.0) the
playback rate. If you want your show to play twice as fast
(finish in half the time), you want all your time values to be
half of the specified values in the show so you would use a
speed value of 2.0. To make the show take twice as
long
to finish, you would a speed value of 0.5.
start_step: Integer of which step in the show file the show
should start in. Usually this is 1 (start at the beginning
of the show) but it's nice to start part way through. Also
used for restarting shows that you paused. A negative value
will count backwards from the end (-1 is the last position,
-2 is second to last, etc.). Note this is the "human readable"
step, so the first step is 1, not 0.
callback: A callback function that is invoked when the show is
stopped.
loops: Integer of how many times you want this show to repeat
before stopping. A value of -1 means that it repeats
indefinitely. If the show only has one step, loops will be set
to 0, regardless of the actual number of loops
sync_ms: Number of ms of the show sync cycle. A value of zero means
this show will also start playing immediately. A value of None
means the mpf:default_show_sync_ms will be used.
manual_advance: Boolean that controls whether this show should be
advanced manually (e.g. time values are ignored and the show
doesn't move to the next step until it's told to.) Default is
False.
show_tokens: Replacement tokens for the show
Returns:
The RunningShow() instance if this show plays now, or False if
the show is not loaded. (In this case the show will be loaded and
will automatically play once its loaded.)
"""
if not show_tokens:
show_tokens = dict()
# todo if we want to enforce that show_tokens match the tokens in the
# show exactly, uncomment below and remove the following if.
# however we don't do this today because of the default 'off' show
# that's used since it has lights and leds, so we'll have to think
# about this.
# if set(show_tokens.keys()) != self.tokens:
# raise ValueError('Token mismatch while playing show "{}". Tokens '
# 'expected: {}. Tokens submitted: {}'.format(
# self.name, self.tokens, set(show_tokens.keys())))
if not set(show_tokens.keys()).issubset(self.tokens): # pragma: no cover
raise ValueError('Token mismatch while playing show "{}". Tokens '
'expected: {}. Tokens submitted: {}'.
format(self.name, self.tokens, set(show_tokens.keys())))
show_config = self.machine.show_controller.create_show_config(
self.name, priority, speed, loops, sync_ms, manual_advance, show_tokens, events_when_played,
events_when_stopped, events_when_looped, events_when_paused, events_when_resumed, events_when_advanced,
events_when_stepped_back, events_when_updated, events_when_completed)
return self.play_with_config(show_config, start_time, start_callback, callback, start_step)
@staticmethod
def _get_mpfcache_file_name(file_name):
cache_dir = tempfile.gettempdir()
path_hash = str(hashlib.md5(bytes(file_name, 'UTF-8')).hexdigest()) # nosec
result = os.path.join(cache_dir, path_hash)
return result + ".mpf_cache"
def load_show_from_disk(self):
"""Load show from disk."""
return self.machine.config_processor.load_config_files_with_cache(
[self.file], "show", load_from_cache=not self.machine.options['no_load_cache'],
store_to_cache=self.machine.options['create_config_cache'])
# This class is more or less a container
# pylint: disable-msg=too-many-instance-attributes
class RunningShow(object):
"""A running instance of a show."""
__slots__ = ["machine", "show", "show_steps", "show_config", "callback", "start_step", "start_callback",
"_delay_handler", "next_step_index", "current_step_index", "next_step_time", "name", "loops",
"id", "_players", "debug", "_stopped", "_show_loaded", "_total_steps", "context"]
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def __init__(self, machine, show, show_steps, start_step, callback, start_time, start_callback, show_config):
"""Initialise an instance of a show."""
self.machine = machine
self.show = show
self.show_steps = show_steps
self.show_config = show_config
self.callback = callback
self.start_step = start_step
self.start_callback = start_callback
self._delay_handler = None
self.next_step_index = None
self.current_step_index = None
self.next_step_time = start_time
self.name = show.name
self.loops = self.show_config.loops
self.id = self.machine.show_controller.get_next_show_id()
self.context = "show_{}".format(self.id)
self._players = set()
# if show_tokens:
# self.show_tokens = show_tokens
# else:
# self.show_tokens = dict()
self.debug = False
self._stopped = False
self._total_steps = None
if show_steps:
self._show_loaded = True
self._start_play()
else:
self._show_loaded = False
def show_loaded(self, show):
"""Handle that a deferred show was loaded.
Start playing the show as if it started earlier.
"""
del show
self._show_loaded = True
self.show_steps = self.show.get_show_steps()
self._start_play()
def _start_play(self):
if self._stopped:
return
self._total_steps = len(self.show_steps)
if self.start_step > 0:
self.next_step_index = self.start_step - 1
elif self.start_step < 0:
self.next_step_index = self.start_step % self._total_steps
else:
self.next_step_index = 0
if self.show_config.show_tokens and self.show.tokens:
self._replace_token_values(**self.show_config.show_tokens)
self._replace_token_keys(**self.show_config.show_tokens)
# Figure out the show start time
if self.show_config.sync_ms:
# calculate next step based on synchronized start time
self.next_step_time += (self.show_config.sync_ms / 1000.0) - (self.next_step_time %
(self.show_config.sync_ms / 1000.0))
# but wait relative to real time
delay_secs = self.next_step_time - self.machine.clock.get_time()
self._delay_handler = self.machine.clock.schedule_once(
self._start_now, delay_secs)
else: # run now
self._start_now()
def _post_events(self, actions):
for action in actions:
events = getattr(self.show_config, "events_when_{}".format(action), None)
if events:
for event in events:
self.machine.events.post(event)
def __repr__(self):
"""Return str representation."""
return 'Running Show Instance: "{}" {} {}'.format(self.name, self.show_config.show_tokens, self.next_step_index)
def _replace_token_values(self, **kwargs):
for token, replacement in kwargs.items():
if token in self.show.token_values:
for token_path in self.show.token_values[token]:
target = self.show_steps
for x in token_path[:-1]:
target = target[x]
if isinstance(target[token_path[-1]], RuntimeToken):
target[token_path[-1]] = target[token_path[-1]].validator_function(replacement, None)
elif target[token_path[-1]] == "(" + token + ")":
target[token_path[-1]] = replacement
else:
target[token_path[-1]] = target[token_path[-1]].replace("(" + token + ")", replacement)
def _replace_token_keys(self, **kwargs):
keys_replaced = dict()
# pylint: disable-msg=too-many-nested-blocks
for token, replacement in kwargs.items():
if token in self.show.token_keys:
key_name = '({})'.format(token)
for token_path in self.show.token_keys[token]:
target = self.show_steps
token_str = ""
for x in token_path[:-1]:
if token_str in keys_replaced:
x = keys_replaced[token_str + str(x) + "-"]
token_str += str(x) + "-"
target = target[x]
use_string_replace = bool(token_path[-1] != "(" + token + ")")
final_key = token_path[-1]
if final_key in keys_replaced:
final_key = keys_replaced[final_key]
if use_string_replace:
replaced_key = final_key.replace("(" + token + ")", replacement)
else:
replaced_key = replacement
if final_key in target:
target[replaced_key] = target.pop(final_key)
else:
raise KeyError("Could not find token {} ({}) in {}".format(final_key, key_name, target))
keys_replaced[token_str] = replaced_key
@property
def stopped(self):
"""Return if stopped."""
return self._stopped
def stop(self):
"""Stop show."""
if self._stopped:
return
self._stopped = True
# if the start callback has never been called then call it now
if self.start_callback:
self.start_callback()
self.start_callback = None
self._remove_delay_handler()
# clear context in used players
for player in self._players:
self.machine.show_controller.show_players[player].show_stop_callback(self.context)
self._players = set()
if self.callback and callable(self.callback):
self.callback()
self._post_events(['stopped'])
def _remove_delay_handler(self):
if self._delay_handler:
self.machine.clock.unschedule(self._delay_handler)
self._delay_handler = None
def pause(self):
"""Pause show."""
self._remove_delay_handler()
self._post_events(['paused'])
def resume(self):
"""Resume paused show."""
if not self._show_loaded:
return
self.next_step_time = self.machine.clock.get_time()
self._run_next_step(post_events='resumed')
def update(self, **kwargs):
"""Update show.
Not implemented yet.
"""
# todo
raise NotImplementedError("Show update is not implemented yet. It's "
"coming though...")
# don't forget this when we implement this feature
# self._post_events(['updated'])
def advance(self, steps=1, show_step=None):
"""Manually advance this show to the next step."""
self._remove_delay_handler()
if steps != 1:
self.next_step_index += steps - 1
elif show_step is not None:
if not isinstance(show_step, int) or show_step < 0:
raise AssertionError('Cannot advance {} to step "{}" as that is'
'not a valid step number.'.format(self, show_step))
self.next_step_index = show_step - 1
if self._show_loaded:
self._run_next_step(post_events='advanced')
def step_back(self, steps=1):
"""Manually step back this show to a previous step."""
self._remove_delay_handler()
self.next_step_index -= steps + 1
if self._show_loaded:
self._run_next_step(post_events='stepped_back')
def _start_now(self) -> None:
"""Start playing the show."""
if self.start_callback:
self.start_callback()
self.start_callback = None
self._run_next_step(post_events='played')
def _run_next_step(self, post_events=None) -> None:
"""Run the next show step."""
events = []
if post_events:
events.append(post_events)
if self.next_step_index < 0:
self.next_step_index %= self._total_steps
# if we're at the end of the show
if self.next_step_index >= self._total_steps:
if self.loops > 0:
self.loops -= 1
self.next_step_index = 0
events.append('looped')
elif self.loops < 0:
self.next_step_index = 0
events.append('looped')
else:
self.stop()
events.append("completed")
self._post_events(events)
return
self.current_step_index = self.next_step_index
for item_type, item_dict in self.show_steps[self.current_step_index].items():
if item_type == 'duration':
continue
player = self.machine.show_controller.show_players.get(item_type, None)
if not player:
raise ValueError("Invalid entry in show: {}".format(item_type))
player.show_play_callback(
settings=item_dict,
context=self.context,
calling_context=self.current_step_index,
priority=self.show_config.priority,
show_tokens=self.show_config.show_tokens,
start_time=self.next_step_time)
self._players.add(item_type)
self._post_events(events)
self.next_step_index += 1
time_to_next_step = self.show_steps[self.current_step_index]['duration'] / self.show_config.speed
if not self.show_config.manual_advance and time_to_next_step > 0:
self.next_step_time += time_to_next_step
self._delay_handler = self.machine.clock.schedule_once(self._run_next_step,
self.next_step_time - self.machine.clock.get_time())
| python | 27,796 |
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.constants import STATUS4ROLE
from openprocurement.auctions.core.utils import (
save_auction,
apply_patch,
opresource,
get_file,
upload_file,
update_file_content_type,
json_view,
context_unpack,
APIResource,
)
from openprocurement.auctions.core.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
@opresource(name='rubbleOther:Auction Complaint Documents',
collection_path='/auctions/{auction_id}/complaints/{complaint_id}/documents',
path='/auctions/{auction_id}/complaints/{complaint_id}/documents/{document_id}',
auctionsprocurementMethodType="rubbleOther",
description="Auction complaint documents")
class AuctionComplaintDocumentResource(APIResource):
@json_view(permission='view_auction')
def collection_get(self):
"""Auction Complaint Documents List"""
if self.request.params.get('all', ''):
collection_data = [i.serialize("view") for i in self.context.documents]
else:
collection_data = sorted(dict([
(i.id, i.serialize("view"))
for i in self.context.documents
]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(validators=(validate_file_upload,), permission='edit_complaint')
def collection_post(self):
"""Auction Complaint Document Upload
"""
if self.request.validated['auction_status'] not in ['active.tendering', 'active.auction', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) auction status'.format(self.request.validated['auction_status']))
self.request.errors.status = 403
return
if self.context.status not in STATUS4ROLE.get(self.request.authenticated_role, []):
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) complaint status'.format(self.context.status))
self.request.errors.status = 403
return
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.context.documents.append(document)
if save_auction(self.request):
self.LOGGER.info('Created auction complaint document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_complaint_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_auction')
def get(self):
"""Auction Complaint Document Read"""
if self.request.params.get('download'):
return get_file(self.request)
document = self.request.validated['document']
document_data = document.serialize("view")
document_data['previousVersions'] = [
i.serialize("view")
for i in self.request.validated['documents']
if i.url != document.url
]
return {'data': document_data}
@json_view(validators=(validate_file_update,), permission='edit_complaint')
def put(self):
"""Auction Complaint Document Update"""
if self.request.authenticated_role != self.context.author:
self.request.errors.add('url', 'role', 'Can update document only author')
self.request.errors.status = 403
return
if self.request.validated['auction_status'] not in ['active.tendering', 'active.auction', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) auction status'.format(self.request.validated['auction_status']))
self.request.errors.status = 403
return
if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) complaint status'.format(self.request.validated['complaint'].status))
self.request.errors.status = 403
return
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.request.validated['complaint'].documents.append(document)
if save_auction(self.request):
self.LOGGER.info('Updated auction complaint document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_complaint_document_put'}))
return {'data': document.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_document_data,), permission='edit_complaint')
def patch(self):
"""Auction Complaint Document Update"""
if self.request.authenticated_role != self.context.author:
self.request.errors.add('url', 'role', 'Can update document only author')
self.request.errors.status = 403
return
if self.request.validated['auction_status'] not in ['active.tendering', 'active.auction', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) auction status'.format(self.request.validated['auction_status']))
self.request.errors.status = 403
return
if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) complaint status'.format(self.request.validated['complaint'].status))
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info('Updated auction complaint document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_complaint_document_patch'}))
return {'data': self.request.context.serialize("view")}
| python | 6,538 |
# Generated by Django 3.0.2 on 2020-05-19 08:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('date_of_birth', models.DateField(blank=True, help_text='yyyy-mm-dd', null=True)),
('date_of_death', models.DateField(blank=True, help_text='yyyy-mm-dd', null=True, verbose_name='Died')),
],
options={
'ordering': ['last_name', 'first_name'],
},
),
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('post', models.TextField(help_text='What is the body of your blog post.')),
('pub_date', models.DateField(auto_now_add=True)),
('pub_time', models.TimeField(auto_now_add=True)),
],
options={
'ordering': ['-pub_date', '-pub_time'],
},
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('summary', models.TextField(help_text='Enter a brief description of the book', max_length=1000)),
('isbn', models.CharField(help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>', max_length=13, verbose_name='ISBN')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
],
options={
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a book genre (e.g. Science Fiction)', max_length=200)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='What language is this print in?', max_length=25)),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='Unique ID for this particular book across whole library', primary_key=True, serialize=False)),
('imprint', models.CharField(max_length=200)),
('borrowed_on', models.DateField(blank=True, null=True)),
('due_back', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, choices=[('m', 'Maintenance'), ('o', 'On loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book availability', max_length=1)),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Book')),
('borrower', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['due_back'],
'permissions': (('can_mark_returned', 'Set book as returned'),),
},
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Select a genre for this book', to='catalog.Genre'),
),
migrations.AddField(
model_name='book',
name='language',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Language'),
),
]
| python | 4,601 |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
Tests covering various CNN training options using the ResNet model.
"""
import glob
import json
import os
import portpicker
import statistics
import subprocess
import sys
import tempfile
import tensorflow as tf
import time
import unittest
import pytest
from test_common import get_csv, parse_csv, run_train, cifar10_data_dir
@pytest.mark.category1
class TestBasicFunctionality(unittest.TestCase):
""" Test that the help option works"""
def test_help(self):
help_out = run_train(**{'--help': ''})
self.assertNotEqual(help_out.find("usage: train.py"), -1)
@pytest.mark.category2
@pytest.mark.ipus(1)
class TestMisc(unittest.TestCase):
"""Some miscellaneous options"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--data-dir': cifar10_data_dir,
'--name-suffix': 'penguin',
'--log-dir': 'logs/walrus',
'--iterations': 10,
'--batches-per-step': 10})
cls.logdir = None
for line in out.split('\n'):
if line.find('Saving to ') != -1:
cls.logdir = line[11:]
break
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_results(self):
# test_logdir
self.assertEqual(self.logdir[:12], 'logs/walrus/')
# test_name_suffix
self.assertNotEqual(self.logdir.find('penguin'), -1)
@pytest.mark.category2
@pytest.mark.ipus(1)
class TestCifar10Training(unittest.TestCase):
"""Testing some basic training parameters"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--data-dir': cifar10_data_dir,
'--epochs': 10,
'--warmup-epochs': 0,
'--learning-rate-decay': '0.1',
'--learning-rate-schedule': '0.5,0.75,0.875'})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_results(self):
# test_final_validation_accuracy
final_acc = self.validation['val_acc'][-1]
self.assertGreater(final_acc, 80)
self.assertLess(final_acc, 87)
# test_final_training_accuracy
final_acc = self.training['train_acc_avg'][-1]
self.assertGreater(final_acc, 80)
self.assertLess(final_acc, 87)
# test_learning_rates
self.assertEqual(self.training['lr'][0], 0.5)
self.assertEqual(self.training['lr'][-1], 0.0005)
# test_epochs_completed
self.assertEqual(round(self.training['epoch'][-1]), 10)
@pytest.mark.category2
@pytest.mark.ipus(1)
class TestCifar10FullTraining(unittest.TestCase):
"""Fast training of Cifar-10 to good accuracy"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--data-dir': cifar10_data_dir,
'--epochs': 50,
'--batch-size': 48,
'--warmup-epochs': 2,
'--lr-schedule': 'cosine',
'--label-smoothing': '0.05',
'--base-learning-rate': -5,
'--precision': '16.32'})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_results(self):
# test_final_validation_accuracy
final_acc = statistics.median(self.validation['val_acc'][-3:-1])
self.assertGreater(final_acc, 89.0)
self.assertLess(final_acc, 91.0)
# test_final_training_accuracy
final_acc = self.training['train_acc_avg'][-1]
self.assertGreater(final_acc, 96)
self.assertLess(final_acc, 98)
# test_final_loss
self.assertLess(self.training['loss_batch'][-1], 0.45)
self.assertGreater(self.training['loss_batch'][-1], 0.35)
# test_epochs_completed
self.assertEqual(round(self.training['epoch'][-1]), 50)
@pytest.mark.category2
@pytest.mark.ipus(1)
class TestResNet50SingleIPUTraining(unittest.TestCase):
"""ResNet-50 example on a single IPU.
This is differs from the command line in the README:
here we are testing with generated random data and only 10 iterations.
"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--generated-data': '',
'--dataset': 'ImageNet',
'--model-size': 50,
'--batch-size': 1,
'--available-memory-proportion': 0.1,
'--iterations': 10,
'--batches-per-step': 10})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_iterations_completed(self):
self.assertEqual(self.training['iteration'][-1], 10)
@pytest.mark.category2
@pytest.mark.ipus(2)
class TestResNet50Pipelining2IPUs(unittest.TestCase):
"""Pipelined ResNet-50 from the README but with generated random data
and only 10 iterations.
"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--iterations': 10,
'--batches-per-step': 10,
'--dataset': 'imagenet',
'--generated-data': '',
'--model-size': 50,
'--shards': 2,
'--pipeline': '',
'--gradient-accumulation-count': 256,
'--batch-size': 2,
'--no-validation': '',
'--enable-recomputation': '',
'--available-memory-proportion': 0.1,
'--pipeline-splits': 'b3/1/relu'})
cls.training = get_csv(out, 'training.csv')
def test_iterations_completed(self):
self.assertEqual(self.training['iteration'][-1], 10)
self.assertGreater(self.training['loss_batch'][-1], 0)
@pytest.mark.category2
@pytest.mark.ipus(4)
class TestResNet50Pipelining2IPUs2Replicas(unittest.TestCase):
"""Pipelined and replicated ResNet-50 from the README but with generated random
data and only 10 iterations.
"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--iterations': 10,
'--batches-per-step': 10,
'--dataset': 'imagenet',
'--generated-data': '',
'--model-size': 50,
'--shards': 2,
'--replicas': 2,
'--pipeline': '',
'--gradient-accumulation-count': 128,
'--pipeline-schedule': 'Grouped',
'--batch-size': 2,
'--no-validation': '',
'--enable-recomputation': '',
'--available-memory-proportion': 0.1,
'--pipeline-splits': 'b3/0/relu'})
cls.training = get_csv(out, 'training.csv')
def test_iterations_completed(self):
self.assertEqual(self.training['iteration'][-1], 10)
self.assertGreater(self.training['loss_batch'][-1], 0)
@pytest.mark.category2
@pytest.mark.ipus(2)
class TestReplicatedTraining(unittest.TestCase):
"""Using replicas for data parallelism"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--data-dir': cifar10_data_dir,
'--model': 'resnet',
'--lr-schedule': 'stepped',
'--learning-rate-decay': 0.5,
'--learning-rate-schedule': '0.5,0.9',
'--epochs': 20,
'--replicas': 2})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_results(self):
# test_final_training_accuracy
final_acc = self.training['train_acc_avg'][-1]
self.assertGreater(final_acc, 85)
self.assertLess(final_acc, 95)
# test_epochs_completed
self.assertEqual(round(self.training['epoch'][-1]), 20)
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestLotsOfOptions(unittest.TestCase):
"""Testing lots of other options to check they are still available"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--dataset': 'cifar-10',
'--epochs': 10,
'--model-size': 14,
'--batch-norm': '',
'--pipeline-num-parallel': 8,
'--generated-data': '',
'--batch-size': 16,
'--base-learning-rate': -4,
'--precision': '32.32',
'--seed': 1234,
'--warmup-epochs': 0,
'--no-stochastic-rounding': '',
'--batches-per-step': 100
})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
# We're mostly just testing that training still runs with all the above options.
def test_results(self):
# test_learning_rate
self.assertEqual(self.training['lr'][0], 1.0)
# test_epoch
self.assertEqual(int(self.validation['epoch'][-1] + 0.5), 10)
@pytest.mark.category1
@pytest.mark.ipus(16)
class TestPopdist(unittest.TestCase):
"""Testing training with popdist launched using poprun."""
def test_resnet8(self):
NUM_TOTAL_REPLICAS = 4
NUM_INSTANCES = 2
NUM_LOCAL_REPLICAS = NUM_TOTAL_REPLICAS // NUM_INSTANCES
with tempfile.TemporaryDirectory() as logdir:
# The buildbot runs as root, so let's allow that.
cmd = [
'poprun',
'--mpi-global-args=--tag-output --allow-run-as-root',
'--num-replicas=' + str(NUM_TOTAL_REPLICAS),
'--num-instances=' + str(NUM_INSTANCES),
'--vipu-server-timeout=600',
sys.executable,
'train.py',
'--dataset=cifar-10',
'--generated-data',
'--model-size=8',
'--batch-size=1',
'--batches-per-step=10',
'--gradient-accumulation-count=10',
'--no-validation',
'--no-stochastic-rounding',
'--iterations=100',
'--warmup-epochs=0',
'--log-dir', logdir,
'--name-suffix', 'popdist_instance',
'--ckpt-all-instances', "true",
'--log-all-instances', "true",
'--on-demand'
]
# Add some debug logging.
extra_env = {
'POPRUN_LOG_LEVEL': 'TRACE',
'TF_CPP_VMODULE': 'poplar_compiler=1,poplar_executor=1',
}
cwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
env = os.environ.copy()
env.update(extra_env)
subprocess.check_call(cmd, cwd=cwd, env=env)
instance_logdirs = glob.glob(f"{logdir}/*_popdist_instance_*")
self.assertEqual(len(instance_logdirs), NUM_INSTANCES)
training_logs = []
for instance_logdir in instance_logdirs:
# Check that each instance got the correct number of replicas from popdist.
with open(os.path.join(instance_logdir, 'arguments.json'), 'r') as f:
argument_log = json.load(f)
self.assertEqual(argument_log['replicas'], NUM_LOCAL_REPLICAS)
# Check that the final accuracy is decent.
training_log = parse_csv(os.path.join(instance_logdir, 'training.csv'))
self.assertGreater(training_log['train_acc_avg'][-1], 95)
training_logs.append(training_log)
# The final training accuracy should be the same on all instances.
for i in range(1, NUM_INSTANCES):
self.assertEqual(
training_logs[0]['train_acc_avg'][-1],
training_logs[i]['train_acc_avg'][-1])
# The final training loss should be the same on all instances.
for i in range(1, NUM_INSTANCES):
self.assertEqual(
training_logs[0]['loss_avg'][-1],
training_logs[i]['loss_avg'][-1])
# The final weights should be the same on all instances.
var_names_and_shapes = tf.train.list_variables(instance_logdirs[0])
for var_name, _ in var_names_and_shapes:
value_instance_0 = tf.train.load_variable(instance_logdirs[0], var_name)
for i in range(1, NUM_INSTANCES):
value_instance_i = tf.train.load_variable(instance_logdirs[i], var_name)
self.assertListEqual(value_instance_0.tolist(), value_instance_i.tolist())
@pytest.mark.category3
@pytest.mark.ipus(16)
class TestDistributedTraining(unittest.TestCase):
"""Testing distributed training with multiple processes on a single machine with 16 IPUs."""
def test_resnet_50_from_readme(self):
NUM_WORKERS = 2
WORKER_TIMEOUT_SECONDS = 60 * 60
with tempfile.TemporaryDirectory() as logdir:
cmd = [
'python3', 'train.py',
'--dataset=imagenet',
'--generated-data',
'--model-size=50',
'--batch-size=4',
'--batches-per-step=1',
'--shards=4',
'--pipeline',
'--gradient-accumulation-count=64',
'--pipeline-splits', 'b1/2/relu', 'b2/3/relu', 'b3/5/relu',
'--enable-recomputation',
'--replicas=2', # Instead of 4 to make two processes fit on one machine.
'--distributed',
'--no-stochastic-rounding',
'--no-validation',
'--iterations=100',
'--learning-rate-schedule=1',
'--base-learning-rate=-14',
'--log-dir', logdir,
'--ckpt-all-instances', "true",
'--log-all-instances', "true",
'--on-demand'
]
extra_env = {
'POPLAR_ENGINE_OPTIONS': '{"opt.maxCopyMergeSize": 8388608}',
}
cwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
worker_ports = self._pick_unique_unused_ports(NUM_WORKERS)
cluster_spec = {
'worker': ['localhost:%s' % port for port in worker_ports]
}
processes = self._start_processes_with_tf_config(cmd, cwd, extra_env, cluster_spec)
self._wait_for_processes(processes, WORKER_TIMEOUT_SECONDS)
worker_log_dirs = self._find_worker_log_dirs(NUM_WORKERS, logdir)
training_logs = [parse_csv(os.path.join(d, "training.csv")) for d in worker_log_dirs]
# The final training accuracy should be the same on all workers.
for i in range(1, NUM_WORKERS):
self.assertEqual(
training_logs[0]['train_acc_avg'][-1],
training_logs[i]['train_acc_avg'][-1])
# The final training loss should be the same on all workers.
for i in range(1, NUM_WORKERS):
self.assertEqual(
training_logs[0]['loss_avg'][-1],
training_logs[i]['loss_avg'][-1])
# The final weights should be the same on all workers.
var_names_and_shapes = tf.train.list_variables(worker_log_dirs[0])
for var_name, _ in var_names_and_shapes:
value_worker_0 = tf.train.load_variable(worker_log_dirs[0], var_name)
for i in range(1, NUM_WORKERS):
value_worker_i = tf.train.load_variable(worker_log_dirs[i], var_name)
self.assertListEqual(value_worker_0.tolist(), value_worker_i.tolist())
@staticmethod
def _start_processes_with_tf_config(cmd, cwd, extra_env, cluster_spec):
processes = []
for i in range(len(cluster_spec['worker'])):
env = os.environ.copy()
env.update(extra_env)
env["TF_CONFIG"] = json.dumps({
"cluster": cluster_spec,
"task": {
"type": "worker",
"index": i
}
})
p = subprocess.Popen(cmd + ["--name-suffix", f"worker{i}"], cwd=cwd, env=env)
processes.append(p)
return processes
@staticmethod
def _pick_unique_unused_ports(num_ports):
ports = set()
while len(ports) < num_ports:
ports.add(portpicker.pick_unused_port())
return list(ports)
@staticmethod
def _wait_for_processes(processes, timeout):
start_time = time.monotonic()
remaining = processes[:]
try:
while remaining:
p = remaining[0]
elapsed = time.monotonic() - start_time
returncode = p.wait(timeout - elapsed)
# Only pop after a successful wait to allow for clean up.
remaining.pop(0)
if returncode != 0:
raise subprocess.CalledProcessError(returncode, cmd=" ".join(p.args))
finally:
# Try to clean up by killing any processes still alive.
for p in remaining:
p.kill()
@staticmethod
def _find_worker_log_dirs(num_workers, logdir):
worker_log_dirs = []
for i in range(num_workers):
logdirs = glob.glob(f"{logdir}/*_worker{i}_*")
if len(logdirs) != 1:
raise RuntimeError(f"Expected 1 worker dir, found {len(logdirs)}: {logdirs}")
worker_log_dirs.append(logdirs[0])
return worker_log_dirs
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestConfig(unittest.TestCase):
"""Testing lots of other options to check they are still available"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--config': 'mk2_resnet8_test',
'--data-dir': cifar10_data_dir,
})
cls.training = get_csv(out, 'training.csv')
def test_results(self):
# test the cmd line arg overrode config arg
self.assertEqual(int(self.training['epoch'][-1]), 10)
@pytest.mark.category2
@pytest.mark.ipus(2)
@pytest.mark.ipu_version("ipu2")
class TestResNet50RecomputeDbnTraining(unittest.TestCase):
"""ResNet-50 example on two IPUs with distributed batch norm and recompute.
"""
@classmethod
def setUpClass(cls):
out = run_train(**{'--generated-data': '',
'--dataset': 'ImageNet',
'--model-size': 50,
'--batch-size': 8,
'--available-memory-proportion': 0.1,
'--iterations': 10,
'--BN-span': 2,
'--internal-exchange-optimisation-target': 'memory',
'--pipeline': '',
'--gradient-accumulation-count': 2,
'--pipeline-schedule': 'Sequential',
'--enable-recomputation': '',
'--pipeline-splits': 'b1/0/relu',
'--eight-bit': '',
'--replicas': 2,
'--enable-half-partials': '',
'--disable-variable-offloading': '',
'--batch-norm': '',
'--normalise-input': ''
})
cls.validation = get_csv(out, 'validation.csv')
cls.training = get_csv(out, 'training.csv')
def test_iterations_completed(self):
self.assertEqual(self.training['iteration'][-1], 500)
| python | 20,493 |
import secrets
import string
import traceback
from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utilsV1 import FsUtils
from utility.log import Log
log = Log(__name__)
def run(ceph_cluster, **kw):
"""
CEPH-83574027 - Ensure creation of Subvolgroups, subvolumes works on NFS exports and run IO from nfs clients
Pre-requisites:
1. Create cephfs volume
creats fs volume create <vol_name>
2. Create nfs cluster
ceph nfs cluster create <nfs_name> <nfs_server>
Test operation:
1. Create cephfs nfs export
ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
2. Crete 2 cephfs subvolume group
3. Create cephfs subvolume in cephfs subvolume group
4. Create cephfs subvolume in deafault cephfs subvolume group
5. Mount nfs mount with cephfs export
"mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
7. Verify subvolume groups & subvolumes are created
6. Run IOs on both cephfs subvolumegroups & subvolumes
Clean-up:
1. Remove all the data in Cephfs file system
2. Remove all the cephfs mounts
3. Delete cephfs nfs export
"""
try:
tc = "CEPH-83574027"
log.info(f"Running cephfs {tc} test case")
config = kw["config"]
build = config.get("build", config.get("rhbuild"))
fs_util = FsUtils(ceph_cluster)
clients = ceph_cluster.get_ceph_objects("client")
client1 = clients[0]
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
mon_node_ip = fs_util.get_mon_node_ips()
mon_node_ip = ",".join(mon_node_ip)
rhbuild = config.get("rhbuild")
nfs_servers = ceph_cluster.get_ceph_objects("nfs")
nfs_server = nfs_servers[0].node.hostname
nfs_name = "cephfs-nfs"
nfs_export_name = "/export_" + "".join(
secrets.choice(string.digits) for i in range(3)
)
export_path = "/"
fs_name = "cephfs"
nfs_mounting_dir = "/mnt/nfs_" + "".join(
secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
)
if "5.0" in rhbuild:
client1.exec_command(
sudo=True,
cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
f"{nfs_export_name} path={export_path}",
)
else:
client1.exec_command(
sudo=True,
cmd=f"ceph nfs export create cephfs {nfs_name} "
f"{nfs_export_name} {fs_name} path={export_path}",
)
subvolumegroup_list = [
{
"vol_name": fs_name,
"group_name": "subvolgroup_1",
},
{
"vol_name": fs_name,
"group_name": "subvolgroup_2",
},
]
for subvolumegroup in subvolumegroup_list:
fs_util.create_subvolumegroup(clients[0], **subvolumegroup)
subvolume_list = [
{
"vol_name": fs_name,
"subvol_name": "subvol_1",
"group_name": "subvolgroup_1",
"size": "5368706371",
},
{
"vol_name": fs_name,
"subvol_name": "subvol_2",
"size": "5368706371",
},
]
for subvolume in subvolume_list:
fs_util.create_subvolume(clients[0], **subvolume)
commands = [
f"mkdir -p {nfs_mounting_dir}",
f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
]
for command in commands:
client1.exec_command(sudo=True, cmd=command)
out, rc = client1.exec_command(sudo=True, cmd=f"ls {nfs_mounting_dir}/volumes/")
if "subvolgroup_1" not in out:
raise CommandFailed("Subvolume group 1 creation failed")
if "subvolgroup_2" not in out:
raise CommandFailed("Subvolume group 2 creation failed")
out, rc = client1.exec_command(
sudo=True, cmd=f"ls {nfs_mounting_dir}/volumes/subvolgroup_1"
)
if "subvol_1" not in out:
raise CommandFailed("Subvolume creation in subvolume group failed")
out, rc = client1.exec_command(
sudo=True, cmd=f"ls {nfs_mounting_dir}/volumes/_nogroup"
)
if "subvol_2" not in out:
raise CommandFailed("Subvolume creation in default subvolume group failed")
commands = [
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files"
f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/volumes/subvolgroup_1/subvol_1",
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 --files"
f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/volumes/subvolgroup_1/subvol_1",
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 8 "
f"--files 2000 --files-per-dir 5 --dirs-per-dir 5 --top {nfs_mounting_dir}/volumes/_nogroup/subvol_2/",
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 8 "
f"--files 2000 --files-per-dir 5 --dirs-per-dir 5 --top {nfs_mounting_dir}/volumes/_nogroup/subvol_2/",
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 5 --file-size 16 "
f"--files 4000 --files-per-dir 20 --dirs-per-dir 4 --top {nfs_mounting_dir}/volumes/subvolgroup_2",
f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 5 --file-size 16 "
f"--files 4000 --files-per-dir 20 --dirs-per-dir 4 --top {nfs_mounting_dir}/volumes/subvolgroup_2",
]
for command in commands:
client1.exec_command(sudo=True, cmd=command, long_running=True)
log.info("Test completed successfully")
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
finally:
log.info("Cleaning up")
client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
client1.exec_command(
sudo=True,
cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
check_ec=False,
)
| python | 6,570 |
import sys
from lmsbreaker import Breaker
#данные пользователя
username = "username"
password = "password"
#номер юнита, который хотим решить
unit_num = 0
#минимальный и максимальный процент выполнения каждого задания
percent_min = 90
percent_max = 100
breaker = Breaker()
try:
breaker.login(username, password)
units = breaker.get_units()
breaker.attempt(units, units[unit_num], percent_min, percent_max)
print("Юнит успешно выполнен.")
except:
print("Произошла ошибка! Юнит не был решен. Попробуйте еще раз.")
print(sys.exc_info())
finally:
#всегда закрываем сессию
breaker.logout()
| python | 648 |
import typing
from commands2 import CommandBase
from subsystems.drivesubsystem import DriveSubsystem
class FieldRelativeDrive(CommandBase):
def __init__(
self,
drive: DriveSubsystem,
forward: typing.Callable[[], float],
sideways: typing.Callable[[], float],
rotation: typing.Callable[[], float],
) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.drive = drive
self.forward = forward
self.sideways = sideways
self.rotation = rotation
self.addRequirements([self.drive])
self.setName(__class__.__name__)
def execute(self) -> None:
self.drive.arcadeDriveWithFactors(
self.forward(),
self.sideways(),
self.rotation(),
DriveSubsystem.CoordinateMode.FieldRelative,
)
| python | 871 |
#!/usr/bin/env python3
class Body:
def __init__(self):
self.bodyParts = []
self.bodyType = None
return
def __str__(self):
return self.getAllBodyParts()
def __len__(self):
return len(self.bodyParts)
def getAllBodyParts(self):
theBody = ""
for part in bodyParts:
theBody += part
return theBody
def addBodyPart(self,bodyPart,bodyPartType):
if( "" == bodyPartType ):
# do something
elif( "" == bodyPartType ):
# do something
elif( "" == bodyPartType ):
# do something
else:
# do default something
return
class BodyPart:
def __init__(self):
return
def __str__(self):
return self.something()
def __len__(self):
return len(self.sometin)
def somethingElse(self):
return
class Header:
def __init__(self):
self.headers = []
self.headerType = None
def __str__(self):
return self.getFullHeader()
def __len__(self):
return len(headers)
def appendHeaderLine(self, line):
self.headers.append(line)
self.headers[len(self.headers)-1] = self.headers[len(self.headers)-1].rstrip("\r\n")
def getFullHeader(self):
strHeader = ""
#strHeader += "DEBUG-Header-Type:"
#strHeader += "None" if None == self.headerType else self.headerType
#strHeader += "///"
for line in self.headers:
strHeader += line
strHeader += "\n"
return strHeader
# Received: from
# the name the sending computer gave for itself (the name associated with that computer's IP address [its IP address])
# by
# the receiving computer's name (the software that computer uses) (usually Sendmail, qmail or Postfix)
# with protocol (usually SMTP, ESMTP or ESMTPS)
# id id assigned by local computer for logging;
# timestamp (usually given in the computer's localtime; see below for how you can convert these all to your time)
class ReceivedHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "RECEIVED"
self.receivedFromSelfIdent = None
self.receivedFromFQDN = None
self.receivedFromIP = None
self.receivedByFQDN = None
self.receivedByIP = None
self.receivedBySoftware = None
self.receivedByProtocol = None
self.receivedByID = None
self.receivedTimestampStr = None
self.receivedTimestampUTC = None
self.EnvelopeFrom = None
self.EnvelopeTo = None
self.misc = None
def parse(self):
self.receivedFromSelfIdent = None
self.receivedFromFQDN = None
self.receivedFromIP = None
self.receivedByFQDN = None
self.receivedByIP = None
self.receivedBySoftware = None
self.receivedByProtocol = None
self.receivedByID = None
self.receivedTimestampStr = None
self.receivedTimestampUTC = None
self.EnvelopeFrom = None
self.EnvelopeTo = None
self.misc = None
# From: tokens [822]
class FromHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "FROM"
self.addressList = None
def parse(self):
self.addressList = None
# To: tokens [822]
class ToHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "TO"
self.addressList = None
def parse(self):
self.addressList = None
# Cc: tokens [822]
class CCHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "CC"
self.addressList = None
def parse(self):
self.addressList = None
# Bcc: user tokens [822]
class BCCHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "BCC"
self.addressList = None
def parse(self):
self.addressList = None
# Sender: tokens [822]
class SenderHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "SENDER"
self.addressList = None
def parse(self):
self.addressList = None
# Reply-To: tokens [822]
class ReplyToHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "REPLYTO"
self.addressList = None
def parse(self):
self.addressList = None
# Date: tokens [822]
class DateHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "DATE"
def parse(self):
self.dateStr = None
# Subject: [822]
class SubjectHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "SUBJECT"
def parse(self):
self.subjectStr = None
# Message-ID: tokens [822]
class MessageIDHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "MESSAGEID"
def parse(self):
self.messageIDStr = None
# Content-Type:
# Base Types:
# Content-Type: application
# Content-Type: audio
# Content-Type: chemical
# Content-Type: image
# Content-Type: message
# Content-Type: model
# Content-Type: multipart
# Content-Type: text
# Content-Type: video
# Content-Type: x-conference
# Examples:
# Content-Type: application/andrew-inset
# Content-Type: application/applixware
# Content-Type: application/atomcat+xml
# Content-Type: application/atomsvc+xml
# Content-Type: application/atom+xml
# Content-Type: application/cals-1840
# Content-Type: application/ccxml+xml,
# Content-Type: application/cdmi-capability
# Content-Type: application/cdmi-container
# Content-Type: application/cdmi-domain
# Content-Type: application/cdmi-object
# Content-Type: application/cdmi-queue
# Content-Type: application/cu-seeme
# Content-Type: application/davmount+xml
# Content-Type: application/dssc+der
# Content-Type: application/dssc+xml
# Content-Type: application/ecmascript
# Content-Type: application/emma+xml
# Content-Type: application/epub+zip
# Content-Type: application/exi
# Content-Type: application/font-tdpfr
# Content-Type: application/hyperstudio
# Content-Type: application/ipfix
# Content-Type: application/java-archive
# Content-Type: application/javascript
# Content-Type: application/java-serialized-object
# Content-Type: application/java-vm
# Content-Type: application/json
# Content-Type: application/mac-binhex40
# Content-Type: application/mac-compactpro
# Content-Type: application/mads+xml
# Content-Type: application/marc
# Content-Type: application/marcxml+xml
# Content-Type: application/mathematica
# Content-Type: application/mathml+xml
# Content-Type: application/mbox
# Content-Type: application/mediaservercontrol+xml
# Content-Type: application/metalink4+xml
# Content-Type: application/mets+xml
# Content-Type: application/mods+xml
# Content-Type: application/mp21
# Content-Type: application/mp4
# Content-Type: application/msword
# Content-Type: application/mxf
# Content-Type: application/news-message-id
# Content-Type: application/news-transmission
# Content-Type: application/octet-stream
# Content-Type: application/octet-stream;
# Content-Type: application/octet-stream; name="Resume.pdf"
# Content-Type: application/oda
# Content-Type: application/oebps-package+xml
# Content-Type: application/ogg
# Content-Type: application/onenote
# Content-Type: application/patch-ops-error+xml
# Content-Type: application/pdf
# Content-Type: application/pdf; name=RSAC17InvoiceReceipt.pdf
# Content-Type: application/pgp-encrypted
# Content-Type: application/pgp-keys
# Content-Type: application/pgp-signature
# Content-Type: application/pics-rules
# Content-Type: application/pkcs10
# Content-Type: application/pkcs7-mime
# Content-Type: application/pkcs7-MIME
# Content-Type: application/pkcs7-signature
# Content-Type: application/pkcs8
# Content-Type: application/pkix-attr-cert
# Content-Type: application/pkix-cert
# Content-Type: application/pkixcmp
# Content-Type: application/pkix-crl
# Content-Type: application/pkix-pkipath
# Content-Type: application/pls+xml
# Content-Type: application/postscript
# Content-Type: application/prs.cww
# Content-Type: application/pskc+xml
# Content-Type: application/rdf+xml
# Content-Type: application/reginfo+xml
# Content-Type: application/relax-ng-compact-syntax
# Content-Type: application/remote-printing
# Content-Type: application/resource-lists-diff+xml
# Content-Type: application/resource-lists+xml
# Content-Type: application/rls-services+xml
# Content-Type: application/rsd+xml
# Content-Type: application/rss+xml
# Content-Type: application/rtf
# Content-Type: application/sbml+xml
# Content-Type: application/scvp-cv-request
# Content-Type: application/scvp-cv-response
# Content-Type: application/scvp-vp-request
# Content-Type: application/scvp-vp-response
# Content-Type: application/sdp
# Content-Type: application/set-payment-initiation
# Content-Type: application/set-registration-initiation
# Content-Type: application/sgml
# Content-Type: application/shf+xml
# Content-Type: application/smil+xml
# Content-Type: application/sparql-query
# Content-Type: application/sparql-results+xml
# Content-Type: application/srgs
# Content-Type: application/srgs+xml
# Content-Type: application/sru+xml
# Content-Type: application/ssml+xml
# Content-Type: application/tei+xml
# Content-Type: application/thraud+xml
# Content-Type: application/timestamped-data
# Content-Type: application/vnd.3gpp2.tcap
# Content-Type: application/vnd.3gpp.pic-bw-large
# Content-Type: application/vnd.3gpp.pic-bw-small
# Content-Type: application/vnd.3gpp.pic-bw-var
# Content-Type: application/vnd.3m.post-it-notes
# Content-Type: application/vnd.accpac.simply.aso
# Content-Type: application/vnd.accpac.simply.imp
# Content-Type: application/vnd.acucobol
# Content-Type: application/vnd.acucorp
# Content-Type: application/vnd.adobe.air-application-installer-package+zip
# Content-Type: application/vnd.adobe.fxp
# Content-Type: application/vnd.adobe.xdp+xml
# Content-Type: application/vnd.adobe.xfdf
# Content-Type: application/vnd.ahead.space
# Content-Type: application/vnd.airzip.filesecure.azf
# Content-Type: application/vnd.airzip.filesecure.azs
# Content-Type: application/vnd.amazon.ebook
# Content-Type: application/vnd.americandynamics.acc
# Content-Type: application/vnd.amiga.ami
# Content-Type: application/vnd.android.package-archive
# Content-Type: application/vnd.anser-web-certificate-issue-initiation
# Content-Type: application/vnd.anser-web-funds-transfer-initiation
# Content-Type: application/vnd.antix.game-component
# Content-Type: application/vnd.apple.installer+xml
# Content-Type: application/vnd.apple.mpegurl
# Content-Type: application/vnd.aristanetworks.swi
# Content-Type: application/vnd.audiograph
# Content-Type: application/vnd.blueice.multipass
# Content-Type: application/vnd.bmi
# Content-Type: application/vnd.businessobjects
# Content-Type: application/vnd.chemdraw+xml
# Content-Type: application/vnd.chipnuts.karaoke-mmd
# Content-Type: application/vnd.cinderella
# Content-Type: application/vnd.claymore
# Content-Type: application/vnd.cloanto.rp9
# Content-Type: application/vnd.clonk.c4group
# Content-Type: application/vnd.cluetrust.cartomobile-config
# Content-Type: application/vnd.cluetrust.cartomobile-config-pkg
# Content-Type: application/vnd.commonspace
# Content-Type: application/vnd.contact.cmsg
# Content-Type: application/vnd.cosmocaller
# Content-Type: application/vnd.crick.clicker
# Content-Type: application/vnd.crick.clicker.keyboard
# Content-Type: application/vnd.crick.clicker.palette
# Content-Type: application/vnd.crick.clicker.template
# Content-Type: application/vnd.crick.clicker.wordbank
# Content-Type: application/vnd.criticaltools.wbs+xml
# Content-Type: application/vnd.ctc-posml
# Content-Type: application/vnd.cups-ppd
# Content-Type: application/vnd.curl.car
# Content-Type: application/vnd.curl.pcurl
# Content-Type: application/vnd.data-vision.rdz
# Content-Type: application/vnd.denovo.fcselayout-link
# Content-Type: application/vnd.dna
# Content-Type: application/vnd.dolby.mlp
# Content-Type: application/vnd.dpgraph
# Content-Type: application/vnd.dreamfactory
# Content-Type: application/vnd.dvb.ait
# Content-Type: application/vnd.dvb.service
# Content-Type: application/vnd.dynageo
# Content-Type: application/vnd.ecowin.chart
# Content-Type: application/vnd.enliven
# Content-Type: application/vnd.epson.esf
# Content-Type: application/vnd.epson.msf
# Content-Type: application/vnd.epson.quickanime
# Content-Type: application/vnd.epson.salt
# Content-Type: application/vnd.epson.ssf
# Content-Type: application/vnd.eszigno3+xml
# Content-Type: application/vnd.ezpix-album
# Content-Type: application/vnd.ezpix-package
# Content-Type: application/vnd.fdf
# Content-Type: application/vnd.fdsn.seed
# Content-Type: application/vnd.flographit
# Content-Type: application/vnd.fluxtime.clip
# Content-Type: application/vnd.framemaker
# Content-Type: application/vnd.frogans.fnc
# Content-Type: application/vnd.frogans.ltf
# Content-Type: application/vnd.fsc.weblaunch
# Content-Type: application/vnd.fujitsu.oasys
# Content-Type: application/vnd.fujitsu.oasys2
# Content-Type: application/vnd.fujitsu.oasys3
# Content-Type: application/vnd.fujitsu.oasysgp
# Content-Type: application/vnd.fujitsu.oasysprs
# Content-Type: application/vnd.fujixerox.ddd
# Content-Type: application/vnd.fujixerox.docuworks
# Content-Type: application/vnd.fujixerox.docuworks.binder
# Content-Type: application/vnd.fuzzysheet
# Content-Type: application/vnd.genomatix.tuxedo
# Content-Type: application/vnd.geogebra.file
# Content-Type: application/vnd.geogebra.tool
# Content-Type: application/vnd.geometry-explorer
# Content-Type: application/vnd.geonext
# Content-Type: application/vnd.geoplan
# Content-Type: application/vnd.geospace
# Content-Type: application/vnd.gmx
# Content-Type: application/vnd.google-earth.kml+xml
# Content-Type: application/vnd.google-earth.kmz
# Content-Type: application/vnd.grafeq
# Content-Type: application/vnd.groove-account
# Content-Type: application/vnd.groove-help
# Content-Type: application/vnd.groove-identity-message
# Content-Type: application/vnd.groove-injector
# Content-Type: application/vnd.groove-tool-message
# Content-Type: application/vnd.groove-tool-template
# Content-Type: application/vnd.groove-vcard
# Content-Type: application/vnd.hal+xml
# Content-Type: application/vnd.handheld-entertainment+xml
# Content-Type: application/vnd.hbci
# Content-Type: application/vnd.hhe.lesson-player
# Content-Type: application/vnd.hp-hpgl
# Content-Type: application/vnd.hp-hpid
# Content-Type: application/vnd.hp-hps
# Content-Type: application/vnd.hp-jlyt
# Content-Type: application/vnd.hp-pcl
# Content-Type: application/vnd.hp-pclxl
# Content-Type: application/vnd.hydrostatix.sof-data
# Content-Type: application/vnd.hzn-3d-crossword
# Content-Type: application/vnd.ibm.minipay
# Content-Type: application/vnd.ibm.modcap
# Content-Type: application/vnd.ibm.rights-management
# Content-Type: application/vnd.ibm.secure-container
# Content-Type: application/vnd.iccprofile
# Content-Type: application/vnd.igloader
# Content-Type: application/vnd.immervision-ivp
# Content-Type: application/vnd.immervision-ivu
# Content-Type: application/vnd.insors.igm
# Content-Type: application/vnd.intercon.formnet
# Content-Type: application/vnd.intergeo
# Content-Type: application/vnd.intu.qbo
# Content-Type: application/vnd.intu.qfx
# Content-Type: application/vnd.ipunplugged.rcprofile
# Content-Type: application/vnd.irepository.package+xml
# Content-Type: application/vnd.isac.fcs
# Content-Type: application/vnd.is-xpr
# Content-Type: application/vnd.jam
# Content-Type: application/vnd.jcp.javame.midlet-rms
# Content-Type: application/vnd.jisp
# Content-Type: application/vnd.joost.joda-archive
# Content-Type: application/vnd.kahootz
# Content-Type: application/vnd.kde.karbon
# Content-Type: application/vnd.kde.kchart
# Content-Type: application/vnd.kde.kformula
# Content-Type: application/vnd.kde.kivio
# Content-Type: application/vnd.kde.kontour
# Content-Type: application/vnd.kde.kpresenter
# Content-Type: application/vnd.kde.kspread
# Content-Type: application/vnd.kde.kword
# Content-Type: application/vnd.kenameaapp
# Content-Type: application/vnd.kidspiration
# Content-Type: application/vnd.kinar
# Content-Type: application/vnd.koan
# Content-Type: application/vnd.kodak-descriptor
# Content-Type: application/vnd.las.las+xml
# Content-Type: application/vnd.llamagraphics.life-balance.desktop
# Content-Type: application/vnd.llamagraphics.life-balance.exchange+xml
# Content-Type: application/vnd.lotus-1-2-3
# Content-Type: application/vnd.lotus-approach
# Content-Type: application/vnd.lotus-freelance
# Content-Type: application/vnd.lotus-notes
# Content-Type: application/vnd.lotus-organizer
# Content-Type: application/vnd.lotus-screencam
# Content-Type: application/vnd.lotus-wordpro
# Content-Type: application/vnd.macports.portpkg
# Content-Type: application/vnd.mcd
# Content-Type: application/vnd.medcalcdata
# Content-Type: application/vnd.mediastation.cdkey
# Content-Type: application/vnd.mfer
# Content-Type: application/vnd.mfmp
# Content-Type: application/vnd.micrografx.flo
# Content-Type: application/vnd.micrografx.igx
# Content-Type: application/vnd.mif
# Content-Type: application/vnd.mobius.daf
# Content-Type: application/vnd.mobius.dis
# Content-Type: application/vnd.mobius.mbk
# Content-Type: application/vnd.mobius.mqy
# Content-Type: application/vnd.mobius.msl
# Content-Type: application/vnd.mobius.plc
# Content-Type: application/vnd.mobius.txf
# Content-Type: application/vnd.mophun.application
# Content-Type: application/vnd.mophun.certificate
# Content-Type: application/vnd.mozilla.xul+xml
# Content-Type: application/vnd.ms-artgalry
# Content-Type: application/vnd.ms-cab-compressed
# Content-Type: application/vnd.mseq
# Content-Type: application/vnd.ms-excel
# Content-Type: application/vnd.ms-excel.addin.macroenabled.12
# Content-Type: application/vnd.ms-excel.sheet.binary.macroenabled.12
# Content-Type: application/vnd.ms-excel.sheet.macroenabled.12
# Content-Type: application/vnd.ms-excel.template.macroenabled.12
# Content-Type: application/vnd.ms-fontobject
# Content-Type: application/vnd.ms-htmlhelp
# Content-Type: application/vnd.ms-ims
# Content-Type: application/vnd.ms-lrm
# Content-Type: application/vnd.ms-officetheme
# Content-Type: application/vnd.ms-pki.seccat
# Content-Type: application/vnd.ms-pki.stl
# Content-Type: application/vnd.ms-powerpoint
# Content-Type: application/vnd.ms-powerpoint.addin.macroenabled.12
# Content-Type: application/vnd.ms-powerpoint.presentation.macroenabled.12
# Content-Type: application/vnd.ms-powerpoint.slide.macroenabled.12
# Content-Type: application/vnd.ms-powerpoint.slideshow.macroenabled.12
# Content-Type: application/vnd.ms-powerpoint.template.macroenabled.12
# Content-Type: application/vnd.ms-project
# Content-Type: application/vnd.ms-word.document.macroenabled.12
# Content-Type: application/vnd.ms-word.template.macroenabled.12
# Content-Type: application/vnd.ms-works
# Content-Type: application/vnd.ms-wpl
# Content-Type: application/vnd.ms-xpsdocument
# Content-Type: application/vnd.musician
# Content-Type: application/vnd.muvee.style
# Content-Type: application/vnd.neurolanguage.nlu
# Content-Type: application/vnd.noblenet-directory
# Content-Type: application/vnd.noblenet-sealer
# Content-Type: application/vnd.noblenet-web
# Content-Type: application/vnd.nokia.n-gage.data
# Content-Type: application/vnd.nokia.n-gage.symbian.install
# Content-Type: application/vnd.nokia.radio-preset
# Content-Type: application/vnd.nokia.radio-presets
# Content-Type: application/vnd.novadigm.edm
# Content-Type: application/vnd.novadigm.edx
# Content-Type: application/vnd.novadigm.ext
# Content-Type: application/vnd.oasis.opendocument.chart
# Content-Type: application/vnd.oasis.opendocument.chart-template
# Content-Type: application/vnd.oasis.opendocument.database
# Content-Type: application/vnd.oasis.opendocument.formula
# Content-Type: application/vnd.oasis.opendocument.formula-template
# Content-Type: application/vnd.oasis.opendocument.graphics
# Content-Type: application/vnd.oasis.opendocument.graphics-template
# Content-Type: application/vnd.oasis.opendocument.image
# Content-Type: application/vnd.oasis.opendocument.image-template
# Content-Type: application/vnd.oasis.opendocument.presentation
# Content-Type: application/vnd.oasis.opendocument.presentation-template
# Content-Type: application/vnd.oasis.opendocument.spreadsheet
# Content-Type: application/vnd.oasis.opendocument.spreadsheet-template
# Content-Type: application/vnd.oasis.opendocument.text
# Content-Type: application/vnd.oasis.opendocument.text-master
# Content-Type: application/vnd.oasis.opendocument.text-template
# Content-Type: application/vnd.oasis.opendocument.text-web
# Content-Type: application/vnd.olpc-sugar
# Content-Type: application/vnd.oma.dd2+xml
# Content-Type: application/vnd.openofficeorg.extension
# Content-Type: application/vnd.openxmlformats-officedocument.presentationml.presentation
# Content-Type: application/vnd.openxmlformats-officedocument.presentationml.slide
# Content-Type: application/vnd.openxmlformats-officedocument.presentationml.slideshow
# Content-Type: application/vnd.openxmlformats-officedocument.presentationml.template
# Content-Type: application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
# Content-Type: application/vnd.openxmlformats-officedocument.spreadsheetml.template
# Content-Type: application/vnd.openxmlformats-officedocument.wordprocessingml.document
# Content-Type: application/vnd.openxmlformats-officedocument.wordprocessingml.template
# Content-Type: application/vnd.osgeo.mapguide.package
# Content-Type: application/vnd.osgi.dp
# Content-Type: application/vnd.palm
# Content-Type: application/vnd.pawaafile
# Content-Type: application/vnd.pg.format
# Content-Type: application/vnd.pg.osasli
# Content-Type: application/vnd.picsel
# Content-Type: application/vnd.pmi.widget
# Content-Type: application/vnd.pocketlearn
# Content-Type: application/vnd.powerbuilder6
# Content-Type: application/vnd.previewsystems.box
# Content-Type: application/vnd.proteus.magazine
# Content-Type: application/vnd.publishare-delta-tree
# Content-Type: application/vnd.pvi.ptid1
# Content-Type: application/vnd.quark.quarkxpress
# Content-Type: application/vnd.realvnc.bed
# Content-Type: application/vnd.recordare.musicxml
# Content-Type: application/vnd.recordare.musicxml+xml
# Content-Type: application/vnd.rig.cryptonote
# Content-Type: application/vnd.rim.cod
# Content-Type: application/vnd.rn-realmedia
# Content-Type: application/vnd.route66.link66+xml
# Content-Type: application/vnd.sailingtracker.track
# Content-Type: application/vnd.seemail
# Content-Type: application/vnd.sema
# Content-Type: application/vnd.semd
# Content-Type: application/vnd.semf
# Content-Type: application/vnd.shana.informed.formdata
# Content-Type: application/vnd.shana.informed.formtemplate
# Content-Type: application/vnd.shana.informed.interchange
# Content-Type: application/vnd.shana.informed.package
# Content-Type: application/vnd.simtech-mindmapper
# Content-Type: application/vnd.smaf
# Content-Type: application/vnd.smart.teacher
# Content-Type: application/vnd.solent.sdkm+xml
# Content-Type: application/vnd.spotfire.dxp
# Content-Type: application/vnd.spotfire.sfs
# Content-Type: application/vnd.stardivision.calc
# Content-Type: application/vnd.stardivision.draw
# Content-Type: application/vnd.stardivision.impress
# Content-Type: application/vnd.stardivision.math
# Content-Type: application/vnd.stardivision.writer
# Content-Type: application/vnd.stardivision.writer-global
# Content-Type: application/vnd.stepmania.stepchart
# Content-Type: application/vnd.sun.xml.calc
# Content-Type: application/vnd.sun.xml.calc.template
# Content-Type: application/vnd.sun.xml.draw
# Content-Type: application/vnd.sun.xml.draw.template
# Content-Type: application/vnd.sun.xml.impress
# Content-Type: application/vnd.sun.xml.impress.template
# Content-Type: application/vnd.sun.xml.math
# Content-Type: application/vnd.sun.xml.writer
# Content-Type: application/vnd.sun.xml.writer.global
# Content-Type: application/vnd.sun.xml.writer.template
# Content-Type: application/vnd.sus-calendar
# Content-Type: application/vnd.svd
# Content-Type: application/vnd.symbian.install
# Content-Type: application/vnd.syncml.dm+wbxml
# Content-Type: application/vnd.syncml.dm+xml
# Content-Type: application/vnd.syncml+xml
# Content-Type: application/vnd.tao.intent-module-archive
# Content-Type: application/vnd.tmobile-livetv
# Content-Type: application/vnd.trid.tpt
# Content-Type: application/vnd.triscape.mxs
# Content-Type: application/vnd.trueapp
# Content-Type: application/vnd.ufdl
# Content-Type: application/vnd.uiq.theme
# Content-Type: application/vnd.umajin
# Content-Type: application/vnd.unity
# Content-Type: application/vnd.uoml+xml
# Content-Type: application/vnd.vcx
# Content-Type: application/vnd.visio
# Content-Type: application/vnd.visio2013
# Content-Type: application/vnd.visionary
# Content-Type: application/vnd.vsf
# Content-Type: application/vnd.wap.wbxml
# Content-Type: application/vnd.wap.wmlc
# Content-Type: application/vnd.wap.wmlscriptc
# Content-Type: application/vnd.webturbo
# Content-Type: application/vnd.wolfram.player
# Content-Type: application/vnd.wordperfect
# Content-Type: application/vnd.wqd
# Content-Type: application/vnd.wt.stf
# Content-Type: application/vnd.xara
# Content-Type: application/vnd.xfdl
# Content-Type: application/vnd.yamaha.hv-dic
# Content-Type: application/vnd.yamaha.hv-script
# Content-Type: application/vnd.yamaha.hv-voice
# Content-Type: application/vnd.yamaha.openscoreformat
# Content-Type: application/vnd.yamaha.openscoreformat.osfpvg+xml
# Content-Type: application/vnd.yamaha.smaf-audio
# Content-Type: application/vnd.yamaha.smaf-phrase
# Content-Type: application/vnd.yellowriver-custom-menu
# Content-Type: application/vnd.zul
# Content-Type: application/vnd.zzazz.deck+xml
# Content-Type: application/voicexml+xml
# Content-Type: application/widget
# Content-Type: application/winhlp
# Content-Type: application/wsdl+xml
# Content-Type: application/wspolicy+xml
# Content-Type: application/x400-bp
# Content-Type: application/x-7z-compressed
# Content-Type: application/x-abiword
# Content-Type: application/x-ace-compressed
# Content-Type: application/x-apple-diskimage
# Content-Type: application/x-authorware-bin
# Content-Type: application/x-authorware-map
# Content-Type: application/x-authorware-seg
# Content-Type: application/x-bcpio
# Content-Type: application/x-bittorrent
# Content-Type: application/x-bzip
# Content-Type: application/x-bzip2
# Content-Type: application/xcap-diff+xml
# Content-Type: application/x-cdlink
# Content-Type: application/x-chat
# Content-Type: application/x-chess-pgn
# Content-Type: application/x-cpio
# Content-Type: application/x-csh
# Content-Type: application/x-debian-package
# Content-Type: application/x-director
# Content-Type: application/x-doom
# Content-Type: application/x-dtbncx+xml
# Content-Type: application/x-dtbook+xml
# Content-Type: application/x-dtbresource+xml
# Content-Type: application/x-dvi
# Content-Type: application/xenc+xml
# Content-Type: application/x-font-bdf
# Content-Type: application/x-font-ghostscript
# Content-Type: application/x-font-linux-psf
# Content-Type: application/x-font-otf
# Content-Type: application/x-font-pcf
# Content-Type: application/x-font-snf
# Content-Type: application/x-font-ttf
# Content-Type: application/x-font-type1
# Content-Type: application/x-font-woff
# Content-Type: application/x-futuresplash
# Content-Type: application/x-gnumeric
# Content-Type: application/x-gtar
# Content-Type: application/x-hdf
# Content-Type: application/xhtml+xml
# Content-Type: application/x-java-jnlp-file
# Content-Type: application/x-latex
# Content-Type: application/xml
# Content-Type: application/xml-dtd
# Content-Type: application/x-mobipocket-ebook
# Content-Type: application/x-msaccess
# Content-Type: application/x-ms-application
# Content-Type: application/x-msbinder
# Content-Type: application/x-mscardfile
# Content-Type: application/x-msclip
# Content-Type: application/x-msdownload
# Content-Type: application/x-msmediaview
# Content-Type: application/x-msmetafile
# Content-Type: application/x-msmoney
# Content-Type: application/x-mspublisher
# Content-Type: application/x-msschedule
# Content-Type: application/x-msterminal
# Content-Type: application/x-ms-wmd
# Content-Type: application/x-ms-wmz
# Content-Type: application/x-mswrite
# Content-Type: application/x-ms-xbap
# Content-Type: application/x-netcdf
# Content-Type: application/xop+xml
# Content-Type: application/x-pkcs12
# Content-Type: application/x-pkcs7-certificates
# Content-Type: application/x-pkcs7-certreqresp
# Content-Type: application/x-rar-compressed
# Content-Type: application/x-sh
# Content-Type: application/x-shar
# Content-Type: application/x-shockwave-flash
# Content-Type: application/x-silverlight-app
# Content-Type: application/xslt+xml
# Content-Type: application/xspf+xml
# Content-Type: application/x-stuffit
# Content-Type: application/x-stuffitx
# Content-Type: application/x-sv4cpio
# Content-Type: application/x-sv4crc
# Content-Type: application/x-tar
# Content-Type: application/x-tcl
# Content-Type: application/x-tex
# Content-Type: application/x-texinfo
# Content-Type: application/x-tex-tfm
# Content-Type: application/x-ustar
# Content-Type: application/xv+xml
# Content-Type: application/x-wais-source
# Content-Type: application/x-x509-ca-cert
# Content-Type: application/x-xfig
# Content-Type: application/x-xpinstall
# Content-Type: application/yang
# Content-Type: application/yin+xml
# Content-Type: application/zip
# Content-Type: audio/32kadpcm
# Content-Type: audio/adpcm
# Content-Type: audio/basic
# Content-Type: audio/midi
# Content-Type: audio/mp4
# Content-Type: audio/mpeg
# Content-Type: audio/ogg
# Content-Type: audio/vnd.dece.audio
# Content-Type: audio/vnd.digital-winds
# Content-Type: audio/vnd.dra
# Content-Type: audio/vnd.dts
# Content-Type: audio/vnd.dts.hd
# Content-Type: audio/vnd.lucent.voice
# Content-Type: audio/vnd.ms-playready.media.pya
# Content-Type: audio/vnd.nuera.ecelp4800
# Content-Type: audio/vnd.nuera.ecelp7470
# Content-Type: audio/vnd.nuera.ecelp9600
# Content-Type: audio/vnd.rip
# Content-Type: audio/webm
# Content-Type: audio/x-aac
# Content-Type: audio/x-aiff
# Content-Type: audio/x-mpegurl
# Content-Type: audio/x-ms-wax
# Content-Type: audio/x-ms-wma
# Content-Type: audio/x-pn-realaudio
# Content-Type: audio/x-pn-realaudio-plugin
# Content-Type: audio/x-wav
# Content-Type: chemical/x-cdx
# Content-Type: chemical/x-cif
# Content-Type: chemical/x-cmdf
# Content-Type: chemical/x-cml
# Content-Type: chemical/x-csml
# Content-Type: chemical/x-xyz
# Content-Type: image/bmp
# Content-Type: image/cgm
# Content-Type: image/g3fax
# Content-Type: image/gif
# Content-Type: image/ief
# Content-Type: image/jpeg
# Content-Type: image/jpeg; name="image001.jpg"
# Content-Type: image/jpeg; name="TemptationShortcut.jpg"
# Content-Type: image/ktx
# Content-Type: image/pjpeg
# Content-Type: image/png
# Content-Type: image/prs.btif
# Content-Type: image/svg+xml
# Content-Type: image/tiff
# Content-Type: image/vnd.adobe.photoshop
# Content-Type: image/vnd.dece.graphic
# Content-Type: image/vnd.djvu
# Content-Type: image/vnd.dvb.subtitle
# Content-Type: image/vnd.dwg
# Content-Type: image/vnd.dxf
# Content-Type: image/vnd.fastbidsheet
# Content-Type: image/vnd.fpx
# Content-Type: image/vnd.fst
# Content-Type: image/vnd.fujixerox.edmics-mmr
# Content-Type: image/vnd.fujixerox.edmics-rlc
# Content-Type: image/vnd.ms-modi
# Content-Type: image/vnd.net-fpx
# Content-Type: image/vnd.wap.wbmp
# Content-Type: image/vnd.xiff
# Content-Type: image/webp
# Content-Type: image/x-citrix-jpeg
# Content-Type: image/x-citrix-png
# Content-Type: image/x-cmu-raster
# Content-Type: image/x-cmx
# Content-Type: image/x-freehand
# Content-Type: image/x-icon
# Content-Type: image/x-pcx
# Content-Type: image/x-pict
# Content-Type: image/x-png
# Content-Type: image/x-portable-anymap
# Content-Type: image/x-portable-bitmap
# Content-Type: image/x-portable-graymap
# Content-Type: image/x-portable-pixmap
# Content-Type: image/x-rgb
# Content-Type: image/x-xbitmap
# Content-Type: image/x-xpixmap
# Content-Type: image/x-xwindowdump
# Content-Type: message/delivery-status
# Content-Type: message/disposition-notification-to
# Content-Type: message/external-body
# Content-Type: message/http
# Content-Type: message/partial
# Content-Type: message/rfc822
# Content-Type: model/iges
# Content-Type: model/mesh
# Content-Type: model/vnd.collada+xml
# Content-Type: model/vnd.dwf
# Content-Type: model/vnd.gdl
# Content-Type: model/vnd.gtw
# Content-Type: model/vnd.mts
# Content-Type: model/vnd.vtu
# Content-Type: model/vrml
# Content-Type: multipart/alternative;
# Content-Type: multipart/alternative; boundary=001a113a2dc6dd3487053f79bc24
# Content-Type: multipart/alternative; boundary="001a11456c8817dd1d055d352f8c"
# Content-Type: multipart/alternative; boundary="_----056dTAi7CnMb4YC6zcuzcw===_CB/19-64684-8C2612A5"
# Content-Type: multipart/alternative; boundary="089e082b9ca8b3811005607e8c7d"
# Content-Type: multipart/alternative; boundary=12781cc67c6d4bdc5c62fe572df6f07067ed31bca7176f259611800bc7ed
# Content-Type: multipart/alternative; boundary="===============2300963478671213537=="
# Content-Type: multipart/alternative; boundary="6kpfp7cF1q82tUL7as8jVsg6vSxX=_GkhV"
# Content-Type: multipart/alternative; boundary="cdf82e78-582d-4a55-9037-dacf81ae37d3"
# Content-Type: multipart/alternative; boundary="de3m6f=_VUkD4F9LsJ1SCYvVv7MXYQfySv"
# Content-Type: multipart/alternative; boundary="__slack_222209002__"
# Content-Type: multipart/byteranges; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/digest; boundary="089e082b9ca8b3811005607e8c7d"
# Content-Type: multipart/encrypted; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/form-data; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/mixed;
# Content-Type: multipart/mixed; boundary=001a113ed43687e70b053d097387
# Content-Type: multipart/mixed; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/mixed-replace; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/parallel
# Content-Type: multipart/related;
# Content-Type: multipart/related; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/report; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: multipart/signed; boundary="94eb2c0e6238d7dc6f05607ea548"
# Content-Type: text/calendar
# Content-Type: text/css
# Content-Type: text/csv
# Content-Type: text/enriched
# Content-Type: text/html
# Content-Type: text/html;
# Content-Type: text/html; charset="us-ascii"
# Content-Type: text/html; charset="utf-8"
# Content-Type: text/html; charset=utf-8
# Content-Type: text/html; charset = "utf-8"
# Content-Type: text/html; charset="UTF-8"
# Content-Type: text/html; charset=UTF-8
# Content-Type: text/n3
# Content-Type: text/plain
# Content-Type: text/plain;
# Content-Type: text/plain-bas
# Content-Type: text/plain; charset="us-ascii"
# Content-Type: text/plain; charset="us-ascii";
# Content-Type: text/plain; charset="US-ASCII"; name="test_text_file.txt"
# Content-Type: text/plain; charset="utf-8"
# Content-Type: text/plain; charset=utf-8
# Content-Type: text/plain; charset = "utf-8"
# Content-Type: text/plain; charset="UTF-8"
# Content-Type: text/plain; charset=UTF-8
# Content-Type: text/plain; charset=utf-8; format=flowed
# Content-Type: text/plain; charset=windows-1252; format=flowed
# Content-Type: text/prs.lines.tag
# Content-Type: text/rfc822-headers
# Content-Type: text/richtext
# Content-Type: text/sgml
# Content-Type: text/tab-separated-values
# Content-Type: text/troff
# Content-Type: text/turtle
# Content-Type: text/uri-list
# Content-Type: text/vnd.curl
# Content-Type: text/vnd.curl.dcurl
# Content-Type: text/vnd.curl.mcurl
# Content-Type: text/vnd.curl.scurl
# Content-Type: text/vnd.fly
# Content-Type: text/vnd.fmi.flexstor
# Content-Type: text/vnd.graphviz
# Content-Type: text/vnd.in3d.3dml
# Content-Type: text/vnd.in3d.spot
# Content-Type: text/vnd.sun.j2me.app-descriptor
# Content-Type: text/vnd.wap.wml
# Content-Type: text/vnd.wap.wmlscript
# Content-Type: text/x-asm
# Content-Type: text/x-c
# Content-Type: text/x-fortran
# Content-Type: text/x-java-source,java
# Content-Type: text/x-markdown; charset="US-ASCII"; name="README.md"
# Content-Type: text/x-pascal
# Content-Type: text/x-setext
# Content-Type: text/x-uuencode
# Content-Type: text/x-vcalendar
# Content-Type: text/x-vcard
# Content-Type: text/yaml
# Content-Type: video/3gpp
# Content-Type: video/3gpp2
# Content-Type: video/h261
# Content-Type: video/h263
# Content-Type: video/h264
# Content-Type: video/jpeg
# Content-Type: video/jpm
# Content-Type: video/mj2
# Content-Type: video/mp4
# Content-Type: video/mpeg
# Content-Type: video/ogg
# Content-Type: video/quicktime
# Content-Type: video/vnd.dece.hd
# Content-Type: video/vnd.dece.mobile
# Content-Type: video/vnd.dece.pd
# Content-Type: video/vnd.dece.sd
# Content-Type: video/vnd.dece.video
# Content-Type: video/vnd.fvt
# Content-Type: video/vnd.mpegurl
# Content-Type: video/vnd.ms-playready.media.pyv
# Content-Type: video/vnd.uvvu.mp4
# Content-Type: video/vnd.vivo
# Content-Type: video/webm
# Content-Type: video/x-f4v
# Content-Type: video/x-fli
# Content-Type: video/x-flv
# Content-Type: video/x-m4v
# Content-Type: video/x-ms-asf
# Content-Type: video/x-msvideo
# Content-Type: video/x-ms-wm
# Content-Type: video/x-ms-wmv
# Content-Type: video/x-ms-wmx
# Content-Type: video/x-ms-wvx
# Content-Type: video/x-sgi-movie
# Content-Type: x-conference/x-cooltalk
class ContentTypeHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "CONTENTTYPE"
# Content-Type := type "/" subtype *[";" parameter]
# type := "application" / "audio"
# / "image" / "message"
# / "multipart" / "text"
# / "video" / x-token
# x-token := <The two characters "X-" followed, with no
# intervening white space, by any token>
# subtype := token
# parameter := attribute "=" value
# attribute := token
# value := token / quoted-string
# token := 1*<any CHAR except SPACE, CTLs, or tspecials>
# tspecials := "(" / ")" / "<" / ">" / "@" ; Must be in
# / "," / ";" / ":" / "\" / <"> ; quoted-string,
# / "/" / "[" / "]" / "?" / "." ; to use within
# / "=" ; parameter values
def parse(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = None
self.contentSubTypeStr = None
self.headerOnOneLine = ""
for line in self.headers:
self.headerOnOneLine += line.strip()
self.headerOnOneLine = self.headerOnOneLine[len("Content-Type:"):]
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
if( element.startswith( "application" ) ):
self.parseApplicationType()
break
elif( element.startswith( "audio" ) ):
self.parseAudioType()
break
elif( element.startswith( "chemical" ) ):
self.parseChemicalType()
break
elif( element.startswith( "image" ) ):
self.parseImageType()
break
elif( element.startswith( "message" ) ):
self.parseMessageType()
break
elif( element.startswith( "model" ) ):
self.parseModelType()
break
elif( element.startswith( "multipart" ) ):
self.parseMultipartType()
break
elif( element.startswith( "text" ) ):
self.parseTextType()
break
elif( element.startswith( "video" ) ):
self.parseVideoType()
break
elif( element.startswith( "x-conference" ) ):
self.parseXConferenceType()
break
def parseApplicationType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "application"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseAudioType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "audio"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseChemicalType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "chemical"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseImageType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "image"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseMessageType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "message"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseModelType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "model"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseMultipartType(self):
print( "DEBUG: In Content-Type header parseMultipartType:" )
self.contentTypeStr = "multipart"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG: In Content-Type header parseMultipartType: element:",element )
if( element.startswith( self.contentTypeStr ) ):
t1, t2 = [x.strip() for x in element.split("/")]
self.contentSubTypeStr = t2
print( "DEBUG: In Content-Type header parseMultipartType: element: multipart:",t2 )
elif( element.startswith( "boundary" ) ):
t1, t2 = [x.strip() for x in element.split("=")]
self.multipartBoundaryStr = t2
print( "DEBUG: In Content-Type header parseMultipartType: element: boundary:",t2 )
def parseTextType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "text"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseVideoType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "video"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
def parseXConferenceType(self):
print( "DEBUG: In Content-Type header parse:" )
self.contentTypeStr = "x-conference"
for element in [x.strip() for x in self.headerOnOneLine.split(";")]:
print( "DEBUG:" )
# Content-Transfer-Encoding:
# Content-Transfer-Encoding: quoted-printable
class ContentTransferEncodingHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "CONTENTTRANSFERENCODING"
def parse(self):
self.ContentTransferEncodingStr = None
# Delivered-To:
class DeliveredToHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "DELIVEREDTO"
def parse(self):
self.deliveredToStr = None
# MIME-Version:
class MimeVersionHeader(Header):
def __init__(self):
super().__init__()
self.headerType = "MIMEVERSION"
def parse(self):
self.mimeVersionStr = None
# Acknowledge-To:
# Also-Control:
# Alternate-Recipient:
# Apparently-To:
# Approved:
# Article-Names:
# Article-Updates:
# Auto-Forwarded:
# Autoforwarded: [1327]
# Auto-Submitted:
# Comments: [822]
# Confirm-Delivery:
# Confirm-Reading:
# Content-Base:
# Content-Conversion:
# Content-Description:
# Content-Disposition:
# Content-ID:
# Content-Identifier: [1327]
# Content-Language:
# Content-Length: mailbox
# Content-Location:
# Content-MD5:
# Content-Return:
# Content-SGML-Entity:
# Control:
# Conversion: [1327]
# Conversion-With-Loss: [1327]
# Deferred-Delivery: [1327]
# Delivered-By-The-Graces-Of:
# Delivery-Date: [1327]
# Discarded-X400-IPMS-Extensions: [1327]
# Discarded-X400-MTS-Extensions: [1327]
# Disclose-Recipients:
# Disposition-Notification-To:
# Distribution:
# DL-Expansion-History: [1327]
# Encoding:
# Encrypted: [822]
# Envelope-Recipient:
# Envelope-Sender:
# Errors-To:
# Expires:
# Expiry-Date: [1327]
# Fax:
# Fcc: user
# Followup-To:
# For-Comment:
# For-Handling:
# Generate-Delivery-Report:
# Importance: [1327]
# In-Reply-To: tokens [822]
# Incomplete-Copy: [1327]
# Keywords: [822]
# Language: [1327]
# Latest-Delivery-Time: [1327]
# Lines:
# List-Archive:
# List-Help:
# List-ID:
# List-Owner:
# List-Subscribe:
# List-Unsubscribe:
# Mail-Followup-To: tokens
# Mail-Reply-To: tokens
# Mail-System-Version:
# Mailer:
# Mailing-List:
# Message-Type: [1327]
# Newsgroups:
# Notice-Requested-Upon-Delivery-To: tokens [spec]
# Obsoletes: [1327]
# Organization:
# Original-Encoded-Information-Types: [1327]
# Originating-Client:
# Originator-Info:
# Originator-Return-Address: [1327]
# Path:
# Phone:
# Precedence:
# Prevent-Nondelivery-Report:
# Priority: [1327]
# Received: tokens [822]
# References: tokens [822]
# Reply-By: [1327]
# Requested-Delivery-Method: [1327]
# Return-Path: tokens [822]
# Return-Receipt-To: [info]
# See-Also:
# Sensitivity: [1327]
# Status: mailbox
# Summary:
# Supersedes:
# Telefax:
# Versions:
# X-Confirm-Reading-To:
# X-Mailer:
# X-MS-Embedded-Report:
# X-Newsreader:
# X-PMRQC:
# X-Priority:
# X-Sender:
# X-Status: mailbox
# X-X-Sender:
# X400-Content-Return:
# X400-Content-Type: [1327]
# X400-MTS-Identifier: [1327]
# X400-Originator: [1327]
# X400-Received: [1327]
# X400-Recipients: [1327]
# Xref:
def main():
#emailHeadersLines = []
emailHeadersObjects = []
emailBodyLines = []
emailFilepath = "test_email_003a.eml"
file_object = open(emailFilepath, "r")
readingHeaders = True
readingBody = False
for line in file_object:
# print( "DEBUG:", line )
line = line.strip('\r\n')
if( 0 == len(line) ):
readingHeaders = False
if( True == readingHeaders ):
print( "Line Length:", len(line) )
if( line.startswith( "Received:", 0, len("Received:") ) ):
newHeaderObj = ReceivedHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "From:", 0, len("From:") ) ):
newHeaderObj = FromHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "To:", 0, len("To:") ) ):
newHeaderObj = ToHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Cc:", 0, len("Cc:") ) ):
newHeaderObj = CCHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Bcc:", 0, len("Bcc:") ) ):
newHeaderObj = BCCHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Sender:", 0, len("Sender:") ) ):
newHeaderObj = SenderHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Reply-To:", 0, len("Reply-To:") ) ):
newHeaderObj = ReplyToHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Date:", 0, len("Date:") ) ):
newHeaderObj = DateHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Subject:", 0, len("Subject:") ) ):
newHeaderObj = SubjectHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Message-ID:", 0, len("Message-ID:") ) ):
newHeaderObj = MessageIDHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Content-Type:", 0, len("Content-Type:") ) ):
print( "DEBUG: Found a Content-Type header:", line )
newHeaderObj = ContentTypeHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Content-Transfer-Encoding:", 0, len("Content-Transfer-Encoding:") ) ):
newHeaderObj = ContentTransferEncodingHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "Delivered-To:", 0, len("Delivered-To:") ) ):
newHeaderObj = DeliveredToHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( "MIME-Version:", 0, len("MIME-Version:") ) ):
newHeaderObj = MimeVersionHeader()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( line.startswith( " ", 0, len(" ") ) or
line.startswith( " ", 0, len(" ") ) ):
emailHeadersObjects[len(emailHeadersObjects)-1].appendHeaderLine(line)
else:
newHeaderObj = Header()
newHeaderObj.appendHeaderLine(line)
emailHeadersObjects.append(newHeaderObj)
elif( readingBody == True ):
emailBodyLines.append(line)
else:
readingBody = True
for hdrObj in emailHeadersObjects:
hdrObj.parse()
# print( "Emails Header Lines:", emailHeadersLines)
# print( "Emails Body Lines:", emailBodyLines)
print( "Emails Header Objects:", emailHeadersObjects)
for hdrObj in emailHeadersObjects:
print( " Header Object:", hdrObj )
return
main()
| python | 52,651 |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for fuducoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a fuducoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("fuducoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the fuducoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'fuducoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to fuducoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only availabe on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into fuducoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to fuducoind
expected_msg: regex that stderr should match when fuducoind fails
Will throw if fuducoind starts without an error.
Will throw if an expected_msg is provided and it does not match fuducoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('fuducoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "fuducoind should have exited with an error"
else:
assert_msg = "fuducoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to fuducoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run fuducoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same fuducoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running fuducoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| python | 23,410 |
import os
from os.path import join, isdir, expanduser, isfile
import pathlib
import sys
import json
import csv
import yaml
import traceback
import time
import requests
from multiprocessing import Pool, cpu_count
from functools import partial
import zipfile
import shutil
from pathlib import Path
from http import HTTPStatus
from dependency_analyzer_const import DependencyAnalyzerConstants
from dependency_analyzer_utils import DependencyAnalyzerUtils
from build_outcome_type import BuildOutcomeType
from bugswarm.analyzer.analyzer import Analyzer
from solver_utils import SolverUtils
from final_outcome import FinalOutcome
import import_scanner_utils
""" Implementation of IterativeDependencySolver for BugSwarm """
def main(args):
overall_start_time = time.time()
input_files_path = args[2]
orig_log_path = args[4]
source_code_path = args[6]
intermediate_path = args[8]
artifact_dict = DependencyAnalyzerUtils.get_artifact_dict(input_files_path)
fix_file_contents = get_dependency_analyzer_results(input_files_path)
with Pool(cpu_count()) as pool:
results = pool.map(partial(process_each_artifact_dependency_solve,
artifact_dict=artifact_dict,
intermediate_path=intermediate_path,
source_code_path=source_code_path,
orig_log_path=orig_log_path), fix_file_contents)
results = [line[0] for line in results]
DependencyAnalyzerUtils.write_to_csv(results, DependencyAnalyzerConstants.BUGSWARM_CSV_SOLVER_RESULTS_HEADERS, 'iterative_solve_results.csv')
partial_fix = 0
complete_fix = 0
for row in results:
if row[2] in [FinalOutcome.SUCCESS_FIXED_BUILD, FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_STATUS, FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_ERROR] and len(row[1]):
complete_fix += 1
elif row[2] in [FinalOutcome.PARTIAL_EXHAUSTED_ALL_OPTIONS, FinalOutcome.SUCCESS_NO_LONGER_DEPENDENCY_ERROR, FinalOutcome.PARTIAL_NO_POSSIBLE_CANDIDATES] and len(row[1]):
partial_fix += 1
overall_end_time = time.time()
print('==========**** IterativeDependencySolver FINAL OUTPUT ****==========')
if len(results) == 0:
print('No artifacts to solve for')
else:
print('Number of builds identified: {}'.format(len(results)))
print('Complete Fixes: {}({})'.format(complete_fix, complete_fix*100/len(results)))
print('Partial Fixes: {}({})'.format(partial_fix, partial_fix*100/len(results)))
print('No Fixes: {}({})'.format(len(results) - (complete_fix + partial_fix), (len(results) - (complete_fix + partial_fix))*100/len(results)))
print('Total Runtime: {} minutes'.format((overall_end_time - overall_start_time)/60))
print('==========**** END OF OUTPUT ****==========')
def create_artifact_dir(artifact, f_or_p, intermediate_path):
""" Create directories for storing intermediate results of while
solving a build """
dir_path = '{}/{}/{}'.format(intermediate_path, artifact, f_or_p)
if isdir(dir_path):
return
if not isdir(join(intermediate_path, artifact)):
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_MKDIR.format(join(intermediate_path, artifact)))
if not isdir(join(intermediate_path, artifact, f_or_p)):
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_MKDIR.format(join(intermediate_path, artifact, f_or_p)))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.CHANGE_PERMISSION_CMD.format(join(intermediate_path, artifact, f_or_p)))
def create_run_dir(iter_count, dir_path):
""" Create a directory for storing intermediate results of a run while
solving a build """
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_MKDIR.format(join(dir_path, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.CHANGE_PERMISSION_CMD.format(join(dir_path, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))))
def append_output_log(log_path, content):
with open(join(log_path, DependencyAnalyzerConstants.OUTPUT_LOG), DependencyAnalyzerConstants.FILE_ACCESS_MODE) as f:
for i in content:
f.write(i + DependencyAnalyzerConstants.CHAR_NEW_LINE)
f.write(DependencyAnalyzerConstants.CHAR_NEW_LINE)
def process_each_artifact_dependency_solve(fix_file_row, artifact_dict, intermediate_path, source_code_path, orig_log_path):
""" Process each artifact by generating and applying patches """
final_output = []
start_time = time.time()
accepted_patch_str = DependencyAnalyzerConstants.CHAR_EMPTY
log_output_content = []
log_file_name = fix_file_row[0].strip()
analyzer_result = None
artifact, f_or_p = extract_from_file_name(log_file_name, artifact_dict)
final_iter_count = 0
try:
solve_result = None
analyzer_result = None
cleanup()
output_path = join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p)
create_artifact_dir(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p, intermediate_path)
commit_date = artifact[DependencyAnalyzerConstants.JOB_KEY.format(f_or_p)][DependencyAnalyzerConstants.COMMITTED_AT_KEY]
print('Processing....{}_{}'.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p))
# Step1: Get source code from GitHub
cloned_repo_dir, log_output_content = clone_repo(artifact, f_or_p, log_output_content, source_code_path)
if not cloned_repo_dir:
solve_result = FinalOutcome.FAILED_SOURCE_CODE_CLONE
cleanup(None, output_path, log_output_content)
final_output.append([log_file_name, DependencyAnalyzerConstants.CHAR_EMPTY, solve_result, analyzer_result, (time.time() - start_time), final_iter_count])
return final_output
possible_candidates = json.loads(fix_file_row[2])
# Step 2: Modify .travis.yml
modified_travis_yml, log_output_content = modify_travis_yml(f_or_p, cloned_repo_dir, artifact[DependencyAnalyzerConstants.REPO_KEY], log_output_content)
if not modified_travis_yml:
cleanup(None, output_path, log_output_content, cloned_repo_dir)
solve_result = FinalOutcome.FAILED_UNABLE_TO_MODIFY_TRAVIS_YML
final_output.append([log_file_name, DependencyAnalyzerConstants.CHAR_EMPTY, solve_result, analyzer_result, (time.time() - start_time), final_iter_count])
return final_output
# Step3: Generate test.sh script with travis-build
generated_build_sh, log_output_content = generate_and_prepare_build_script(f_or_p, cloned_repo_dir, artifact[DependencyAnalyzerConstants.REPO_KEY], log_output_content)
if not generated_build_sh:
log_output_content.append(FinalOutcome.FAILED_TO_PREPARE_BUILD_SCRIPT)
solve_result = FinalOutcome.FAILED_TO_PREPARE_BUILD_SCRIPT
cleanup(None, output_path, log_output_content, cloned_repo_dir)
final_output.append([log_file_name, DependencyAnalyzerConstants.CHAR_EMPTY, solve_result, analyzer_result, (time.time() - start_time), final_iter_count])
return final_output
# Step4: Pinning correct versions for possible candidates
dep_files = fix_file_row[3].strip()
patches, log_output_content = SolverUtils.generate_patch_combos(possible_candidates, commit_date, artifact[DependencyAnalyzerConstants.REPO_KEY], log_output_content, cloned_repo_dir, dep_files)
# Step5: Copy files to container to make changes except patch_requirements.txt
curr_errors = fix_file_row[1].split(DependencyAnalyzerConstants.ERROR_LINE_DELIMITER)
run_solver = True
curr_patch_str = DependencyAnalyzerConstants.CHAR_EMPTY
accepted_patch_str = DependencyAnalyzerConstants.CHAR_EMPTY
iter_count = -1
# Step6: Run iterative solver
while(run_solver):
iter_count += 1
final_iter_count += 1
output_log_path = join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))
create_run_dir(iter_count, join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p,))
found_new_patch = False
curr_patch_str = accepted_patch_str
print(join(output_log_path, DependencyAnalyzerConstants.PATCH_COMBO_FILE_NAME))
print("New attempt " + str(iter_count))
print("<accepted_patch_str>")
print(accepted_patch_str)
print("</accepted_patch_str>")
# write available patch combinations to intermediate directory for record keeping
with open(join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count), DependencyAnalyzerConstants.PATCH_COMBO_FILE_NAME), DependencyAnalyzerConstants.FILE_WRITE_PLUS_MODE) as f:
f.write(json.dumps(patches))
accepted_patch_list = accepted_patch_str.split(DependencyAnalyzerConstants.CHAR_NEW_LINE)
need_classical = True
if iter_count == 0:
import_scanner_command = "java -jar /home/pydfix/PythonDependencyFix/import-scanner/build/libs/import-scanner-1.0-SNAPSHOT-all.jar " + cloned_repo_dir + " " + output_log_path
print(import_scanner_command)
process, stdout, stderr, ok = import_scanner_utils._run_command(import_scanner_command)
if ok:
need_classical = False
curr_patch_str = open(join(output_log_path, "scanned_dependencies_requirements_without_version.txt")).read()
if need_classical:
# Step6a: Get patch
for patch in patches:
most_probable_patch = patches[0]
if [p for p in accepted_patch_list if p.strip(DependencyAnalyzerConstants.CHAR_NEW_LINE).startswith(patch[DependencyAnalyzerConstants.NAME_KEY] + DependencyAnalyzerConstants.STR_EQUALS)]:
if not most_probable_patch[DependencyAnalyzerConstants.APPLIED_KEY] and patch[DependencyAnalyzerConstants.NAME_KEY] == most_probable_patch[DependencyAnalyzerConstants.NAME_KEY] and accepted_patch_list[-1].strip(DependencyAnalyzerConstants.CHAR_NEW_LINE).startswith(patch[DependencyAnalyzerConstants.NAME_KEY] + DependencyAnalyzerConstants.STR_EQUALS) and accepted_patch_list[-1].strip(DependencyAnalyzerConstants.CHAR_NEW_LINE).split(DependencyAnalyzerConstants.STR_EQUALS)[-1] != patch[DependencyAnalyzerConstants.VERSION_KEY]:
del accepted_patch_list[-1]
accepted_patch_str = DependencyAnalyzerConstants.CHAR_NEW_LINE.join(accepted_patch_list)
else:
continue
if patch[DependencyAnalyzerConstants.INCLUDED_KEY] or patch[DependencyAnalyzerConstants.APPLIED_KEY]:
continue
if len(patch[DependencyAnalyzerConstants.NAME_KEY]) == 0:
curr_patch_str = accepted_patch_str + patch[DependencyAnalyzerConstants.NAME_KEY] + DependencyAnalyzerConstants.CHAR_NEW_LINE
else:
curr_patch_str = accepted_patch_str + patch[DependencyAnalyzerConstants.NAME_KEY] + DependencyAnalyzerConstants.STR_EQUALS + patch[DependencyAnalyzerConstants.VERSION_KEY] + DependencyAnalyzerConstants.CHAR_NEW_LINE
patch[DependencyAnalyzerConstants.APPLIED_KEY] = True
found_new_patch = True
break
# If no unapplied patches are found
if not found_new_patch:
use_iter_count = iter_count
if iter_count > 0:
use_iter_count = iter_count - 1
# Check if problem has been solved
if check_if_restored_to_original_error(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], artifact[DependencyAnalyzerConstants.JOB_KEY.format(f_or_p)][DependencyAnalyzerConstants.JOB_ID_KEY], use_iter_count, f_or_p, orig_log_path):
# The current log matches the original log
log_output_content.append(FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_STATUS)
solve_result = FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_STATUS
analyzer_result = True
else:
# The current log does not match the original log
analyzer_result = False
log_output_content.append(FinalOutcome.PARTIAL_EXHAUSTED_ALL_OPTIONS)
solve_result = FinalOutcome.PARTIAL_EXHAUSTED_ALL_OPTIONS
cleanup(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), output_log_path, log_output_content, cloned_repo_dir)
break
# write current patch to intermediate directory for record keeping
with open(join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count), DependencyAnalyzerConstants.PATCH_DEPENDENCY_FILE_NAME), DependencyAnalyzerConstants.FILE_WRITE_PLUS_MODE) as f:
f.write(curr_patch_str)
# write current patch to cloned directory for new build
with open(join(cloned_repo_dir, DependencyAnalyzerConstants.PATCH_DEPENDENCY_FILE_NAME), DependencyAnalyzerConstants.FILE_WRITE_MODE) as f:
f.write(curr_patch_str)
# Step6b: Apply patch, check outcome
SolverUtils.remove_docker_container(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p))
container_id, log_output_content = copy_files_to_container(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], cloned_repo_dir, f_or_p, log_output_content, artifact[DependencyAnalyzerConstants.REPO_KEY])
# If failed to run Docker container
if not container_id:
log_output_content.append(FinalOutcome.FAILED_TO_RUN_DOCKER)
cleanup(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), output_log_path, log_output_content, cloned_repo_dir)
solve_result = FinalOutcome.FAILED_TO_RUN_DOCKER
break
build_outcome, log_output_content = execute_patch_changes(container_id, f_or_p, artifact, curr_errors, log_output_content)
# If failed to run build
if not build_outcome:
cleanup(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), output_log_path, log_output_content, cloned_repo_dir)
solve_result = FinalOutcome.FAILED_NO_BUILD_OUTCOME
log_output_content.append(FinalOutcome.FAILED_NO_BUILD_OUTCOME)
break
# Copying build logs for LogErrorAnalyzer
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(
DependencyAnalyzerConstants.COPY_BUGSWARM_SANDBOX_LOG.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p, join(intermediate_path,
artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY],
f_or_p,
DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))))
# If build is successful
if build_outcome == BuildOutcomeType.BUILD_SUCCESSFUL:
log_output_content.append(FinalOutcome.SUCCESS_FIXED_BUILD)
accepted_patch_str = curr_patch_str
cleanup(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), output_log_path, log_output_content, cloned_repo_dir)
solve_result = FinalOutcome.SUCCESS_FIXED_BUILD
use_iter_count = iter_count
if iter_count > 0:
use_iter_count = iter_count - 1
if check_if_restored_to_original_error(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], artifact[DependencyAnalyzerConstants.JOB_KEY.format(f_or_p)][DependencyAnalyzerConstants.JOB_ID_KEY], use_iter_count, f_or_p, orig_log_path):
analyzer_result = True
else:
analyzer_result = False
break
else:
new_errors, candidates_found, new_dep_files, log_output_content = run_log_analyzer(artifact, f_or_p, fix_file_row[0], intermediate_path, iter_count, log_output_content)
if not candidates_found:
if check_if_restored_to_original_error(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], artifact[DependencyAnalyzerConstants.JOB_KEY.format(f_or_p)][DependencyAnalyzerConstants.JOB_ID_KEY], iter_count, f_or_p, orig_log_path):
log_output_content.append(FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_ERROR)
solve_result = FinalOutcome.SUCCESS_RESTORED_TO_ORIGINAL_ERROR
analyzer_result = True
else:
analyzer_result = False
if not new_errors:
log_output_content.append(FinalOutcome.SUCCESS_NO_LONGER_DEPENDENCY_ERROR)
solve_result = FinalOutcome.SUCCESS_NO_LONGER_DEPENDENCY_ERROR
else:
log_output_content.append(FinalOutcome.PARTIAL_EXHAUSTED_ALL_OPTIONS)
solve_result = FinalOutcome.PARTIAL_EXHAUSTED_ALL_OPTIONS
accepted_patch_str = curr_patch_str
cleanup(DependencyAnalyzerConstants.ARTIFACT_DIR.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), output_log_path, log_output_content, cloned_repo_dir)
break
new_candidates_found = False
last_added_patch_caused_error = False
most_probable_candidate = candidates_found[0]
last_added_patch = curr_patch_str.strip(DependencyAnalyzerConstants.CHAR_NEW_LINE).split(DependencyAnalyzerConstants.CHAR_NEW_LINE)[-1]
if len(new_errors):
# Check if last added patch caused error
if not last_added_patch_caused_error and last_added_patch.startswith(most_probable_candidate[DependencyAnalyzerConstants.CANDIDATE_NAME_KEY] + DependencyAnalyzerConstants.STR_EQUALS):
last_added_patch_caused_error = True
# check if any new candidates have been found
for candidate in candidates_found:
is_new_candidate = True
for prev_candidate in possible_candidates:
if prev_candidate[DependencyAnalyzerConstants.CANDIDATE_NAME_KEY] == candidate[DependencyAnalyzerConstants.CANDIDATE_NAME_KEY] and prev_candidate[DependencyAnalyzerConstants.CANDIDATE_VERSION_CONSTRAINT_KEY] == candidate[DependencyAnalyzerConstants.CANDIDATE_VERSION_CONSTRAINT_KEY]:
is_new_candidate = False
break
if is_new_candidate:
new_candidates_found = True
break
if build_outcome == BuildOutcomeType.NEW_ERROR and last_added_patch_caused_error:
# include new candidates if found else discard current candidate
if new_candidates_found:
curr_errors = new_errors.split(DependencyAnalyzerConstants.ERROR_LINE_DELIMITER)
patches, log_output_content = SolverUtils.generate_patch_combos(candidates_found[1:], commit_date, artifact[DependencyAnalyzerConstants.REPO_KEY], log_output_content, cloned_repo_dir, new_dep_files)
log_output_content.append('Last added patch caused new error...discarding')
elif build_outcome == BuildOutcomeType.NEW_ERROR or \
(build_outcome == BuildOutcomeType.SAME_ERROR and new_candidates_found):
# Discard all current candidates
log_output_content.append('Build error changed or new candidates found')
accepted_patch_str = curr_patch_str
curr_errors = new_errors.split(DependencyAnalyzerConstants.ERROR_LINE_DELIMITER)
patches, log_output_content = SolverUtils.generate_patch_combos(candidates_found, commit_date, artifact[DependencyAnalyzerConstants.REPO_KEY], log_output_content, cloned_repo_dir, new_dep_files)
possible_candidates = candidates_found
else:
log_output_content.append('Reject current incremental patch as it did not change error')
except Exception as e:
traceback.print_exc()
print(log_output_content)
log_output_content = []
solve_result = FinalOutcome.FAILED_ERROR.format(e)
end_time = time.time()
final_output.append([log_file_name, accepted_patch_str.replace(DependencyAnalyzerConstants.CHAR_NEW_LINE, DependencyAnalyzerConstants.CHAR_SPACE), solve_result, analyzer_result, (end_time - start_time), final_iter_count])
log_output_content.append(accepted_patch_str)
append_output_log(join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p), log_output_content)
return final_output
def check_if_restored_to_original_error(intermediate_path, img_tag, job_id, iter_count, f_or_p, orig_log_path):
""" Compare current build to original build """
log_path = join(intermediate_path, img_tag, f_or_p, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))
repro_log_path = join(log_path, DependencyAnalyzerConstants.LOG_FILE_NM_PATTERN.format(img_tag, f_or_p))
orig_log_name = '{}.{}.orig.log'.format(img_tag, f_or_p)
if not isfile(join(orig_log_path, orig_log_name)):
return False
if not isfile(repro_log_path):
return False
analyzer = Analyzer()
try:
success, attr_list = analyzer.compare_single_log(repro_log_path, join(orig_log_path,orig_log_name), job_id)
if not success:
for attr in attr_list:
if attr[DependencyAnalyzerConstants.ATTR_KEY] in [DependencyAnalyzerConstants.LOG_STATUS_KEY,
DependencyAnalyzerConstants.LOG_TESTS_RUN_KEY,
DependencyAnalyzerConstants.LOG_TESTS_FAILED_KEY]:
return False
except Exception as e:
print(e)
return False
return True
def clone_repo(artifact, f_or_p, log_output_content, source_code_path):
""" Get Source code """
# return join(source_code_path,
# artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY] +
# DependencyAnalyzerConstants.CHAR_UNDERSCORE + f_or_p), log_output_content
clone_location = join(source_code_path, '{}_{}'.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p))
if isdir(clone_location):
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_RM_RF_CMD.format(clone_location))
diff_url = artifact[DependencyAnalyzerConstants.DIFF_URL_KEY]
response = requests.get(diff_url, auth=(
DependencyAnalyzerConstants.GITHUB_USER_NAME, DependencyAnalyzerConstants.GITHUB_AUTH_TOKEN))
if response.status_code != HTTPStatus.OK:
log_output_content.append('Diff not found')
return None, log_output_content
if f_or_p == DependencyAnalyzerConstants.STR_FAILED:
commit = diff_url.split(DependencyAnalyzerConstants.STR_TWO_DOTS)[0].split(DependencyAnalyzerConstants.CHAR_SLASH)[-1]
else:
commit = diff_url.split(DependencyAnalyzerConstants.STR_TWO_DOTS)[-1]
zip_download_url = DependencyAnalyzerConstants.GITHUB_ARCHIVE_API_URL.format(artifact[DependencyAnalyzerConstants.REPO_KEY], commit)
response = requests.get(zip_download_url, stream=True, auth=(
DependencyAnalyzerConstants.GITHUB_USER_NAME, DependencyAnalyzerConstants.GITHUB_AUTH_TOKEN))
if response.status_code != HTTPStatus.OK:
log_output_content.append('Failed to get source code for failed commit from git archive')
return None, log_output_content
resp_zip = '{}_{}.zip'.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p)
log_output_content.append('Successful to get source code for failed commit')
handle = open(resp_zip, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
handle.close()
with zipfile.ZipFile(resp_zip, DependencyAnalyzerConstants.FILE_READ_MODE) as f:
inner_dir = list({item.split(DependencyAnalyzerConstants.CHAR_SLASH)[0] for item in f.namelist()})[0]
with zipfile.ZipFile(join(os.getcwd(), resp_zip), DependencyAnalyzerConstants.FILE_READ_MODE) as zip_ref:
zip_ref.extractall(clone_location)
for each_file in Path(join(clone_location, inner_dir)).glob('*.*'):
trg_path = each_file.parent.parent
each_file.rename(trg_path.joinpath(each_file.name))
shutil.rmtree(join(clone_location, inner_dir))
if os.path.exists(join(os.getcwd(), resp_zip)):
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_RM_CMD.format(join(os.getcwd(), resp_zip)))
return clone_location, log_output_content
def execute_patch_changes(container_id, f_or_p, artifact, curr_errors, log_output_content):
""" Copy patch_requirements.txt to Docker container and re-run build job """
execute_script_in_container(container_id, f_or_p, artifact[DependencyAnalyzerConstants.REPO_KEY], DependencyAnalyzerConstants.GENERATED_BUILD_SCRIPT_NAME, log_output_content, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY])
return check_build_result(f_or_p, curr_errors, log_output_content, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY])
def run_log_analyzer(artifact, f_or_p, log_file_nm, intermediate_path, iter_count, log_output_content):
""" Run log analyzer to check if a dependency issue still exists, if it does what are the errors and what are the possible candidates """
if not isdir(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE), DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR)):
mkdir_log_path_cmd = DependencyAnalyzerConstants.CREATE_DIR_CMD.format(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(mkdir_log_path_cmd)
if not ok:
log_output_content.append(stderr)
log_output_content.append('Failed to create intermediate log folder')
print('Failed to create intermediate log folder')
return None, None, None, log_output_content
change_per_cmd = DependencyAnalyzerConstants.CHANGE_PERMISSION_CMD.format(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(change_per_cmd)
if not ok:
log_output_content.append(stderr)
log_output_content.append('Failed to change permission')
print('Failed to change permission')
return None, None, None, log_output_content
cp_log_cmd = DependencyAnalyzerConstants.COPY_FILE_CMD.format(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.BUGSWARM_CLIENT_SANDBOX,
DependencyAnalyzerConstants.LOG_FILE_NM_PATTERN.format(artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY], f_or_p)),
join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR,
log_file_nm))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(cp_log_cmd)
if not ok:
print(stderr)
log_output_content.append(stderr)
log_output_content.append('Failed to copy new log')
print('Failed to copy new log')
return None, None, None, log_output_content
cp_art_cmd = DependencyAnalyzerConstants.COPY_FILE_CMD.format(DependencyAnalyzerConstants.ARTIFACTS_JSON_FILE_NAME,
join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(cp_art_cmd)
if not ok:
log_output_content.append(stderr)
log_output_content.append('Failed to copy artifacts.json')
print('Failed to copy artifacts.json')
return None, None, None, log_output_content
log_analyzer_cmd = DependencyAnalyzerConstants.BUGSWARM_RUN_LOG_ANALYZER\
.format(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE), DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR), log_file_nm)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(log_analyzer_cmd)
log_output_content.append(stdout)
if not ok:
log_output_content.append(stderr)
log_output_content.append('Failed to run log analyzer')
print('Failed to run log analyzer')
return None, None, None, log_output_content
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command('cp {} {}'.format('artifacts_dependency_broken_' + log_file_nm.split()[0] + '.csv',
join(intermediate_path, artifact[DependencyAnalyzerConstants.IMAGE_TAG_KEY],
f_or_p, DependencyAnalyzerConstants.ITER_RUN_DIR_NAME.format(iter_count))))
errors = None
candidates = None
files = None
found_row = False
with open('artifacts_dependency_broken_' + log_file_nm.split()[0] + '.csv', DependencyAnalyzerConstants.FILE_READ_MODE) as f:
reader = csv.reader(f)
next(reader, None)
for row in reader:
found_row = True
errors = row[1]
candidates = json.loads(row[2])
files = row[3]
break
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.SUDO_RM_CMD.format('artifacts_dependency_broken_' + log_file_nm.split()[0] + '.csv'))
if found_row:
return errors, candidates, files, log_output_content
return None, None, None, log_output_content
def check_build_result(f_or_p, error_list, log_output_content, img_tag):
""" Check outcome of build from log file received from Docker container """
log_contents = []
with open(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.BUGSWARM_CLIENT_SANDBOX,
DependencyAnalyzerConstants.LOG_FILE_NM_PATTERN.format(img_tag, f_or_p)),
DependencyAnalyzerConstants.FILE_READ_MODE) as f:
log_contents = f.read().splitlines()
for row_id in range(len(log_contents) - 1, 0, -1):
if DependencyAnalyzerConstants.BUILD_SUCCESS_LOG in log_contents[row_id]:
return BuildOutcomeType.BUILD_SUCCESSFUL, log_output_content
else:
for error in error_list:
if len(error.strip()) and error in log_contents[row_id]:
return BuildOutcomeType.SAME_ERROR, log_output_content
return BuildOutcomeType.NEW_ERROR, log_output_content
def execute_script_in_container(container_id, f_or_p, repo_dir_name, build_script_name, log_output_content, img_tag):
""" Execute script in container which will start travis job in Docker and copy the log
back to host """
docker_exec_cmd = DependencyAnalyzerConstants.DOCKER_EXEC_SCRIPT_CMD.format(container_id, f_or_p, repo_dir_name,
build_script_name)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(docker_exec_cmd)
if not ok:
log_output_content.append('Failed to execute script insideDocker container')
return False, log_output_content
log_output_content.append('Successfully executed script in container')
# copying log from container
docker_log_cp_cmd = DependencyAnalyzerConstants.DOCKER_LOG_CP_CMD.format(container_id,
f_or_p,
join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.BUGSWARM_CLIENT_SANDBOX,
DependencyAnalyzerConstants.LOG_FILE_NM_PATTERN.format(img_tag, f_or_p)))
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(docker_log_cp_cmd)
if not ok:
log_output_content.append('Failed to copy log from Docker container')
return False, log_output_content
log_output_content.append('Successfully copied log from container')
return True, log_output_content
def copy_files_to_container(img_tag, cloned_repo_dir, f_or_p, log_output_content, repo_name):
""" Copy build script, source code and python script to run travis build to container """
container_name = DependencyAnalyzerConstants.ARTIFACT_DIR.format(img_tag, f_or_p)
docker_container_cmd = DependencyAnalyzerConstants.DOCKER_RUN_IMAGE_CMD.format(container_name, img_tag)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(docker_container_cmd)
if not ok:
log_output_content.append(stderr)
log_output_content.append('Failed to run Docker container')
return None, log_output_content
get_container_id_cmd = DependencyAnalyzerConstants.DOCKER_GET_CONTAINER_ID_CMD + container_name + '"'
_, container_id, stderr, _ = DependencyAnalyzerUtils._run_command(get_container_id_cmd)
copy_file_cmd = DependencyAnalyzerConstants.DOCKER_COPY_SCRIPT_CMD.format(container_id)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(copy_file_cmd)
if not ok:
log_output_content.append('Failed to copy file to Docker container')
return None, log_output_content
log_output_content.append('Successfully copied script to container')
build_script_path = join(cloned_repo_dir, DependencyAnalyzerConstants.GENERATED_BUILD_SCRIPT_NAME)
repo_dir_path = pathlib.Path(cloned_repo_dir).parent
cp_build_script_cmd = DependencyAnalyzerConstants.DOCKER_COPY_BUILD_SCRIPT_CMD\
.format(build_script_path, container_id, f_or_p)
cp_repo_dir_cmd = DependencyAnalyzerConstants.DOCKER_COPY_SOURCE_CODE_CMD.format(cloned_repo_dir, container_id, f_or_p, repo_name)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(cp_build_script_cmd)
if not ok:
DependencyAnalyzerUtils.print_error_msg(stderr, 'Unable to copy build script', cp_build_script_cmd)
return None, log_output_content
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(cp_repo_dir_cmd)
if not ok:
DependencyAnalyzerUtils.print_error_msg(stderr, 'Unable to copy source code', cp_repo_dir_cmd)
return None, log_output_content
return container_id, log_output_content
def modify_travis_yml(f_or_p, cloned_repo_dir, repo, log_output_content):
""" Modify .travis.yml to install patch dependencies before project dependencies """
with open(join(cloned_repo_dir, DependencyAnalyzerConstants.TRAVIS_YAML_FILE_NAME), DependencyAnalyzerConstants.FILE_READ_MODE) as f:
yaml_content_dict = yaml.load(f, Loader=yaml.FullLoader)
yaml_content_dict = remove_upgrade_option(yaml_content_dict)
pip_install_found, modified_dict = modify_install_step(yaml_content_dict, DependencyAnalyzerConstants.TRAVIS_BEFORE_INSTALL, f_or_p, repo)
if not pip_install_found:
pip_install_found, modified_dict = modify_install_step(yaml_content_dict, DependencyAnalyzerConstants.TRAVIS_INSTALL, f_or_p, repo)
if not pip_install_found:
log_output_content.append('Unable to find pip install step...trying to find the install stage')
install_found, modified_dict = modify_install_step(yaml_content_dict, DependencyAnalyzerConstants.TRAVIS_INSTALL, f_or_p, repo, False)
if not install_found:
added_install, modified_dict = add_install_step(yaml_content_dict, f_or_p, repo)
if not added_install:
log_output_content.append('Unable to add install step...trying to find the install stage')
return False, log_output_content
with open(join(cloned_repo_dir, DependencyAnalyzerConstants.TRAVIS_YAML_FILE_NAME), DependencyAnalyzerConstants.FILE_WRITE_MODE) as f:
yaml.dump(modified_dict, f)
log_output_content.append('Successfully added install step for patch_requirements')
return True, log_output_content
def add_install_step(yaml_content_dict, f_or_p, repo):
modified_dict = {}
inserted_install_step = False
for key in yaml_content_dict:
if not inserted_install_step and key == DependencyAnalyzerConstants.TRAVIS_BEFORE_INSTALL:
modified_dict[key] = yaml_content_dict[key]
modified_dict[DependencyAnalyzerConstants.TRAVIS_INSTALL] = [DependencyAnalyzerConstants.PIP_INSTALL_PATCH.format(f_or_p, repo)]
elif not inserted_install_step and key == DependencyAnalyzerConstants.TRAVIS_SCRIPT:
modified_dict[DependencyAnalyzerConstants.TRAVIS_INSTALL] = [DependencyAnalyzerConstants.PIP_INSTALL_PATCH.format(f_or_p, repo)]
modified_dict[key] = yaml_content_dict[key]
else:
modified_dict[key] = yaml_content_dict[key]
return inserted_install_step, modified_dict
def remove_upgrade_option(content):
if isinstance(content, str):
content = content.replace(DependencyAnalyzerConstants.PIP_INSTALL_UPGRADE_FLAG, DependencyAnalyzerConstants.PIP_INSTALL).replace(DependencyAnalyzerConstants.PIP_INSTALL_U_FLAG, DependencyAnalyzerConstants.PIP_INSTALL)
return content.replace(DependencyAnalyzerConstants.PIP3_INSTALL_UPGRADE_FLAG, DependencyAnalyzerConstants.PIP3_INSTALL).replace(DependencyAnalyzerConstants.PIP3_INSTALL_U_FLAG, DependencyAnalyzerConstants.PIP3_INSTALL)
if isinstance(content, list):
new_list = []
for c in content:
new_list.append(remove_upgrade_option(c))
return new_list
if isinstance(content, dict):
new_dict = {}
for key in content:
new_dict[key] = remove_upgrade_option(content[key])
return new_dict
return content
def modify_install_step(yaml_content_dict, stage_key, f_or_p, repo, add_before_pip=True):
if stage_key in yaml_content_dict:
stage_content = yaml_content_dict[stage_key]
pip_install_found = False
if not add_before_pip:
pip_install_found = True
stage_content = [DependencyAnalyzerConstants.PIP_INSTALL_PATCH.format(f_or_p, repo), stage_content]
else:
if DependencyAnalyzerConstants.PATCH_DEPENDENCY_FILE_NAME in stage_content:
return True, yaml_content_dict
if type(stage_content) and \
(DependencyAnalyzerConstants.PIP_INSTALL in stage_content
or DependencyAnalyzerConstants.SETUP_PY in stage_content):
pip_install_found = True
stage_content = [DependencyAnalyzerConstants.PIP_INSTALL_PATCH.format(f_or_p, repo), stage_content]
elif type(stage_content) and DependencyAnalyzerConstants.PIP3_INSTALL in stage_content:
pip_install_found = True
stage_content = [DependencyAnalyzerConstants.PIP3_INSTALL_PATCH.format(f_or_p, repo), stage_content]
else: #list
for i in range(0, len(stage_content)):
if DependencyAnalyzerConstants.PIP_INSTALL in stage_content[i]\
or DependencyAnalyzerConstants.SETUP_PY in stage_content[i]:
pip_install_found = True
stage_content.insert(i, DependencyAnalyzerConstants.PIP_INSTALL_PATCH.format(f_or_p, repo))
break
elif DependencyAnalyzerConstants.PIP3_INSTALL in stage_content[i]:
pip_install_found = True
stage_content.insert(i, DependencyAnalyzerConstants.PIP3_INSTALL_PATCH.format(f_or_p, repo))
break
if pip_install_found:
yaml_content_dict[stage_key] = stage_content
return True, yaml_content_dict
else:
return False, yaml_content_dict
for key in yaml_content_dict:
if isinstance(yaml_content_dict[key], dict):
is_modified, modified_dict = modify_install_step(yaml_content_dict[key], stage_key, f_or_p, repo)
if is_modified:
yaml_content_dict[key] = modified_dict
return True, yaml_content_dict
elif isinstance(yaml_content_dict[key], list):
for i in range(0, len(yaml_content_dict[key])):
ele = yaml_content_dict[key][i]
if isinstance(ele, dict):
is_modified, modified_dict = modify_install_step(ele, stage_key, f_or_p, repo)
if is_modified:
yaml_content_dict[key][i] = modified_dict
return True, yaml_content_dict
return False, yaml_content_dict
def generate_and_prepare_build_script(f_or_p, cloned_repo_dir, repo, log_output_content):
""" Generate new build script using travis-build to reflect changes made to .travis.yml
Modify build script to prevent git code checkout during build """
travis_command = DependencyAnalyzerConstants.TRAVIS_COMPILE_BUILD_SCRIPT.format(join(expanduser(DependencyAnalyzerConstants.CHAR_TILDE),
DependencyAnalyzerConstants.TRAVIS_BUILD_LOCATION))
cd_command = DependencyAnalyzerConstants.CHANGE_DIR_CMD.format(cloned_repo_dir)
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(f'{cd_command}; {travis_command}')
if not ok:
log_output_content.append(stderr)
log_output_content.append('Unable to generate travis build script')
return False, log_output_content
# prevent git checkout
build_script_path = join(cloned_repo_dir, DependencyAnalyzerConstants.GENERATED_BUILD_SCRIPT_NAME)
lines = []
with open(build_script_path) as f:
skip = False
for line in f:
if DependencyAnalyzerConstants.TRAVIS_START_GIT_CHECKOUT_CMD in line:
skip = True
elif DependencyAnalyzerConstants.TRAVIS_END_GIT_CHECKOUT_CMD in line:
skip = False
lines.append(DependencyAnalyzerConstants.TRAVIS_PREVENT_GIT_CHECKOUT_CMD.format(f_or_p, repo))
else:
if not skip:
lines.append(line)
# Overwrite the original build script with the modified build script.
with open(build_script_path, DependencyAnalyzerConstants.FILE_WRITE_MODE) as f2:
for l in lines:
f2.write(l)
log_output_content.append('Successfully generated new build script')
return True, log_output_content
def extract_from_file_name(log_file_name, artifact_dict):
""" Extract info from BugCatcher log file name """
image_tag = log_file_name.split(DependencyAnalyzerConstants.CHAR_STOP)[0]
artifact = artifact_dict[image_tag]
f_or_p = log_file_name.split(DependencyAnalyzerConstants.CHAR_STOP)[1]
return artifact, f_or_p
def get_dependency_analyzer_results(input_files_path):
""" Read results from csv output of Step 1 """
content = []
with open(join(input_files_path, DependencyAnalyzerConstants.LOG_DEP_ANALYZER_FILENM),
DependencyAnalyzerConstants.FILE_READ_MODE) as f:
reader = csv.reader(f)
next(reader, None)
for row in reader:
content.append(row)
return content
def cleanup(container_name=None, output_log_path=None, log_output_content=None, repo_dir=None):
if output_log_path:
append_output_log(output_log_path, log_output_content)
log_output_content = []
if container_name:
SolverUtils.remove_docker_container(container_name)
remove_intermediate_logs()
#if repo_dir:
# _, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.REMOVE_DIR_CMD.format(repo_dir, DependencyAnalyzerConstants.CHAR_EMPTY))
def remove_intermediate_logs():
_, stdout, stderr, ok = DependencyAnalyzerUtils._run_command(DependencyAnalyzerConstants.REMOVE_DIR_CMD.format(expanduser(DependencyAnalyzerConstants.STR_NOT_EQUALS_TILDE),
DependencyAnalyzerConstants.INTERMEDIATE_LOG_DIR))
if __name__ == '__main__':
# python3 bugswarm_automate_iterative_dependency_solver.py
# -path <path to artifacts_dependency_broken.csv + artifacts.json>
# -sourcecode <path to downloaded source code archives>
# -intermediates <path to intermediate folder>
main(sys.argv)
| python | 47,007 |
# Contains the code to test our about page.
# Imports --------------------------------------------------------------------------------
from tests.functional_test_framework import LiveServerTestCase
# Tests ----------------------------------------------------------------------------------
class TestAboutPage(LiveServerTestCase):
pass
| python | 344 |
from sklearn.metrics import ConfusionMatrixDisplay
from bender.evaluator.interface import Evaluator
from bender.exporter.exporter import Exporter
from bender.split_strategy.split_strategy import TrainingDataSet
from bender.trained_model.interface import TrainedClassificationModel, TrainedEstimatorModel, TrainedModel
class ConfusionMatrix(Evaluator):
exporter: Exporter
def __init__(self, exporter: Exporter) -> None:
self.exporter = exporter
async def evaluate(self, model: TrainedModel, data_set: TrainingDataSet) -> None:
if isinstance(model, TrainedEstimatorModel):
display = ConfusionMatrixDisplay.from_estimator(
model.estimator(), data_set.x_validate, data_set.y_validate.astype(float)
)
elif isinstance(model, TrainedClassificationModel):
predictions = model.predict(data_set.x_validate)
display = ConfusionMatrixDisplay.from_predictions(data_set.y_validate, predictions)
else:
return
_ = display.ax_.set_title('Confusion Matrix')
await self.exporter.store_figure(display.figure_)
| python | 1,133 |
""" Double beta decay utility converter
Provides a useful tool for converting between different double beta
dacay parameters.
"""
import numpy
from echidna.calc import constants as const
import echidna.test.physics_tests as physics_tests
class DBIsotope(object):
""" Class which calculates expected counts for a DBD isotope
over a given experiment livetime.
Args:
name (string): Name of the isotope.
atm_weight_iso (float): Atomic weight of isotope in g/mol.
atm_weight_nat (float): Atomic weight of natural element in g/mol.
abundance (float): Natural abundance of isotope with 0 to 1
equivalent to 0% to 100%.
phase_space (float): Phase space of the isotope.
matrix_element (float): Matrix element of the isotope.
loading (float, optional): Loading of isotope with 0 to 1
equivalent to 0% to 100%. Default is stored in
:class:`echidna.calc.constants`
scint_density (float, optional): Density of liquid scintillator in
kg/mm^3. Default is stored in :class:`echidna.calc.constants`
outer_radius (float, optional): Radius of outer container
containing fiducial volume, e.g. AV, in mm. Default is stored in
:class:`echidna.calc.constants`
Attributes:
_name (string): Name of the isotope.
_atm_weight_iso (float): Atomic weight of isotope in g/mol.
_atm_weight_nat (float): Atomic weight of natural element in g/mol.
_abundance (float): Natural abundance of isotope with 0 to 1
equivalent to 0% to 100%.
_phase_space (float): Phase space of the isotope.
_matrix_element (float): Matrix element of the isotope.
_loading (float): Loading of isotope with 0 to 1 equivalent to 0%
to 100%. Default is stored in :class:`echidna.calc.constants`
_scint_density (float): Density of liquid scintillator in
kg/mm^3. Default is stored in :class:`echidna.calc.constants`
_outer_radius (float): Radius of outer container containing
fiducial volume, e.g. AV, in mm. Default is stored in
:class:`echidna.calc.constants`
Raises:
ValueError: If abundance is < 0. or > 1.
ValueError: If :obj:`outer_radius` is negative or zero.
"""
def __init__(self, name, atm_weight_iso, atm_weight_nat, abundance,
phase_space, matrix_element, loading=None,
outer_radius=None, scint_density=None):
if abundance < 0. or abundance > 1.:
raise ValueError("Abundance ranges from 0 to 1")
self._name = name
self._atm_weight_iso = atm_weight_iso
self._atm_weight_nat = atm_weight_nat
self._abundance = abundance
self._phase_space = phase_space
self._matrix_element = matrix_element
if loading:
if loading < 0. or loading > 1.:
raise ValueError("Loading ranges from 0 to 1")
self._loading = loading
else:
# Default SNO+ Loading
self._loading = const._loading
if outer_radius:
if outer_radius <= 0.:
raise ValueError("Outer radius must be positive and non-zero")
self._outer_radius = outer_radius
else:
self._outer_radius = const._av_radius
if scint_density:
self._scint_density = scint_density
else:
self._scint_density = const._scint_density
def get_n_atoms(self, loading=None, scint_density=None,
target_mass=None, scint_mass=None, outer_radius=None):
""" Calculates the number of atoms of the double-beta isotope.
Set up to follow the full (SNO+-specific) calculation as per
SNO+-doc-1728v2 but can look at other scenarios/detectors by
overriding the default args.
.. warning:: All args default to SNO+ specific values!
Args:
loading (float, optional): Loading of isotope with 0 to 1
equivalent to 0% to 100%. Default is stored as a class
variable.
scint_density (float, optional): Density of liquid scintillator in
kg/mm^3. Default is stored as a class variable.
target_mass (float, optional): Target mass in kg. Calculates a
value by default.
scint_mass (float, optional): Mass of scintillator in kg.
Calculates a value by default.
outer_radius (float, optional): Radius of outer container
containing fiducial volume, e.g. AV, in mm. Default is stored
as a class variable.
Raises:
ValueError: If :obj:`loading` is not between zero and 1.
ValueError: If :obj:`outer_radius` is negative or zero.
Returns:
float: Number of atoms.
"""
# Set defaults
if outer_radius is None: # use class variable
outer_radius = self._outer_radius
if outer_radius <= 0.:
raise ValueError("Outer radius must be positive and non-zero")
if loading is None: # use class variable
loading = self._loading
if loading < 0. or loading > 1.:
raise ValueError("Loading ranges from 0 to 1")
if scint_density is None: # use class variable
scint_density = self._scint_density
if target_mass is None: # Calculate target mass
if scint_mass is None: # Calculate scint_mass
# Mass of scintillator
volume = (4./3.) * numpy.pi * outer_radius**3 # mm^3
scint_mass = scint_density * volume
# Mass fraction
mass_iso = self._atm_weight_iso*const._atomic_mass_unit # kg/atom
mass_nat = self._atm_weight_nat*const._atomic_mass_unit # kg/atom
mass_fraction = self._abundance*mass_iso/mass_nat
target_mass = mass_fraction * loading * scint_mass
n_atoms = (target_mass*const._n_avagadro) /\
(self._atm_weight_iso*1.e-3)
return n_atoms
def half_life_to_activity(self, half_life, n_atoms=None):
""" Calculates the activity for an isotope with a given half-life
and number of atoms.
Args:
half_life (float): Half-life of an isotope in years.
n_atoms (float, optional): Number of atoms of an isotope.
Returns:
float: Activity in decays per year.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
return (numpy.log(2)/half_life)*n_atoms
def activity_to_half_life(self, activity, n_atoms=None):
""" Calculates the half-life of an isotope with a given
activity and number of atoms.
Args:
activity (float): Activity of the isotope in
:math:`years^{-1}`.
n_atoms (float, optional): Number of atoms of an isotope.
Returns:
float: Half-life in years.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
return numpy.log(2)*n_atoms/activity
def eff_mass_to_half_life(self, eff_mass):
""" Converts from effective majorana mass to :math:`0\\nu2\\beta`
half-life.
Args:
eff_mass (float): Effective majorana mass, in eV.
Raises:
ValueError: If effective mass is not positive and non-zero.
Returns:
float: :math:`0\\nu2\\beta` half-life, in years.
"""
if eff_mass <= 0.:
raise ValueError("Effective mass should be positive and non-zero")
sq_mass_ratio = eff_mass**2/const._electron_mass**2
return 1/(self._phase_space*self._matrix_element**2*sq_mass_ratio)
def half_life_to_eff_mass(self, half_life):
""" Converts from :math:`0\\nu2\\beta` half-life to effective
majorana mass.
Args:
half_life (float): :math:`0\\nu2\\beta` half-life, in years.
Returns:
float: Effective majorana mass, in eV.
"""
return numpy.sqrt(const._electron_mass ** 2 /
(self._phase_space * self._matrix_element ** 2 *
half_life))
def activity_to_counts(self, activity, livetime=5.):
""" Converts activity to number of counts, assuming constant activity.
Args:
activity (float): Initial activity of the isotope in
:math:`years^{-1}`.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Number of counts.
"""
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
return activity*livetime
def counts_to_activity(self, counts, livetime=5.):
""" Converts counts to activity, assuming constant activity.
Args:
counts (float): Number of counts.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Activity of the isotope in :math:`years^{-1}`.
"""
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
return counts/livetime
def counts_to_eff_mass(self, counts, n_atoms=None, livetime=5.):
""" Converts from signal counts to effective majorana mass.
Args:
counts (float): Number of signal counts within the livetime
specified.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Effective majorana mass in eV.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
half_life = self.counts_to_half_life(counts, n_atoms, livetime)
return self.half_life_to_eff_mass(half_life)
def eff_mass_to_counts(self, eff_mass, n_atoms=None, livetime=5.):
""" Converts from effective majorana mass to signal counts.
Args:
eff_mass (float): Effective majorana mass in eV.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If effective mass is not positive and non-zero.
ValueError: If arg:`livetime` is not positive and non-zero.
Returns:
float: Expected number of signal counts within the livetime
specified.
"""
if eff_mass <= 0.:
raise ValueError("Effective mass should be positive and non-zero")
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
half_life = self.eff_mass_to_half_life(eff_mass)
return self.half_life_to_counts(half_life, n_atoms, livetime)
def half_life_to_counts(self, half_life, n_atoms=None, livetime=5.):
""" Converts from isotope's half-life to signal counts.
Args:
half_life (float): Isotope's :math:`0\\nu2\\beta` half-life in
years.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Expected number of counts.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
activity = self.half_life_to_activity(half_life, n_atoms)
return self.activity_to_counts(activity, livetime)
def counts_to_half_life(self, counts, n_atoms=None, livetime=5.):
""" Converts from signal count to isotope's half-life.
Args:
count (float): Number of signal counts within the livetime
specified.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Isotope's :math:`0\\nu2\\beta` half-life in years.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
activity = self.counts_to_activity(counts, livetime)
return self.activity_to_half_life(activity, n_atoms)
def test(args):
""" Test function to show agreement with Andy's numbers.
Args:
args (dict): Command line arguments from :mod:`argparse`
"""
# Load signal
signal = store.load(args.signal)
# Cut to 3.5m FV and 5 year livetime
signal.shrink(0.0, 10.0, 0.0, 3500.0, 0.0, 5.0)
# Shrink to ROI
signal.shrink_to_roi(2.46, 2.68, 0) # ROI used by Andy
print "============"
print "decay module"
print "------------"
# Check results of each function
# Create instance of DBIsotope for Te130
Te130_atm_weight = 129.906229 # SNO+-doc-1728v2
TeNat_atm_weight = 127.6 # SNO+-doc-1728v2
Te130_abundance = 0.3408 # SNO+-doc-1728v2
phase_space = 3.69e-14 # PRC 85, 034316 (2012)
matrix_element = 4.03 # IBM-2 PRC 87, 014315 (2013)
te130_converter = DBIsotope("Te130", Te130_atm_weight,
TeNat_atm_weight, Te130_abundance,
phase_space, matrix_element)
# Check get_n_atoms for 0.3% loading, no FV cut
expected = 3.7573e27 # SNO+-doc-1728v2
result, message = physics_tests.test_function_float(
te130_converter.get_n_atoms, expected)
print message, "(no FV cut)"
# Check get_n_atoms with SNO+ defaults
# calculated - A Back 2015-02-25, based on SNO+-doc-1728v2
expected = 7.4694e26
result, message = physics_tests.test_function_float(
te130_converter.get_n_atoms, expected)
print message
# Create a DBIsotope instance for KLZ
# Molar Mass Calculator, http://www.webqc.org/mmcalc.php, 2015-05-07
Xe136_atm_weight = 135.907219
# Molar Mass Calculator, http://www.webqc.org/mmcalc.php, 2015-06-03
Xe134_atm_weight = 133.90539450
# We want the atomic weight of the enriched Xenon
XeEn_atm_weight = 0.9093*Xe136_atm_weight + 0.0889*Xe134_atm_weight
Xe136_abundance = 0.9093 # PRC 86, 021601 (2012)
phase_space = 1433.0e-17 # PRC 85, 034316 (2012)
matrix_element = 3.33 # IBM-2 PRC 87, 014315 (2013)
loading = 0.0244 # 2.44%, PRC 86, 021601 (2012)
scint_density = 756.28e-9 # kg/mm^3 calculated A Back 2015-07-22
outer_radius = 1540. # mm, PRC 86, 021601 (2012)
xe136_converter = DBIsotope("Xe136", Xe136_atm_weight, XeEn_atm_weight,
Xe136_abundance, phase_space, matrix_element,
loading, outer_radius, scint_density)
expected = 5.3985e+26 # Calculated - A Back 2015-06-30
result, message = physics_tests.test_function_float(
xe136_converter.get_n_atoms, expected, loading=loading,
scint_density=scint_density, outer_radius=outer_radius)
print message, "(KamLAND-Zen)"
# Check half_life_to_activity
expected = 50.4 # /y, SNO+-doc-2593v8
half_life = 5.17e25 # y, SNO+-doc-2593v8 (3 sigma FC limit @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_activity, expected, half_life=half_life,
n_atoms=te130_converter.get_n_atoms())
print message, "(no FV cut)"
# Check activity_to_half_life
expected = 5.17e25 # y, SNO+-doc-2593v8
activity = 50.4 # /y, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.activity_to_half_life, expected, activity=activity,
n_atoms=te130_converter.get_n_atoms())
print message, "(no FV cut)"
# Check eff_mass_to_half_life
expected = 4.37e25 # y, SNO+-doc-2593v8 (90% CL @ 1 y livetime)
eff_mass = 0.0999 # eV, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.eff_mass_to_half_life, expected, eff_mass=eff_mass)
print message
# Check half_life_to_eff_mass
expected = 0.0999 # eV, SNO+-doc-2593v8
half_life = 4.37e25 # y, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_eff_mass, expected, half_life=half_life)
print message
# Check activity_to_counts
livetime = 5.0
# ROI counts, SNO+-doc-2593v8 (3 sigma FC limit @ 5 y livetime)
expected = 31.2
# /y SNO+-doc-2593v8 - adjusted to FV
activity = 50.4
result, message = physics_tests.test_function_float(
te130_converter.activity_to_counts, expected, activity=activity,
livetime=livetime)
print message
# Check counts_to_activity
# /y SNO+-doc-2593v8 - adjusted to FV
expected = 50.4
counts = 31.2 # ROI counts, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.counts_to_activity, expected, counts=counts,
livetime=livetime)
print message
# Check counts_to_eff_mass
# eV, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
expected = te130_converter.half_life_to_eff_mass(5.17e25)
counts = 31.2 # ROI counts, SNO+-doc-2593v8 (3 sigma CL @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.counts_to_eff_mass,
expected, counts=counts)
print message
# Check eff_mass_to_counts
expected = 31.2 # ROI counts, SNO+-doc-2593v8 (3 sigma CL @ 5 y livetime)
# eV, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
eff_mass = te130_converter.half_life_to_eff_mass(5.17e25)
result, message = physics_tests.test_function_float(
te130_converter.eff_mass_to_counts,
expected, eff_mass=eff_mass)
print message
# Check half_life_to_counts
expected = 31.2 # ROI counts, SNO+-doc-2593v8
half_life = 5.17e25 # y, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_counts,
expected, half_life=half_life)
print message
# Check counts_to_half_life
expected = 5.17e25 # y, SNO+-doc-2593v8
counts = 31.2 # ROI counts, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.counts_to_half_life,
expected, counts=counts)
print message
print "============"
# Matrix elements - dictionary with Spectra name as key and matrix element as
# value.
matrix_elements = {
# REF: F. Simkovic et al. Phys. Rev. C. 79, 055501 1-10 (2009)
# Averaged over min and max values from columns 2, 4 & 6 in Table III
"Xe136_0n2b_n1": 2.205,
"Xe136_0n2b_n2": None,
# REF: M. Hirsh et al. Phys. Lett. B. 372, 8-14 (1996) - Table 2
# Assuming two Majorons emitted i.e. only type IE or IID modes
"Xe136_0n2b_n3": 1.e-3,
# REF: M. Hirsh et al. Phys. Lett. B. 372, 8-14 (1996) - Table 2
"Xe136_0n2b_n7": 1.e-3
}
# Phase space factors - dictionary with Spectra name as key and phase space
# factor as value.
phase_spaces = {
# REF: Suhonen, J. & Civitarese, O. Physics Reports, Elsevier BV, 300
# 123-214 (1998)
# Table 6
"Xe136_0n2b_n1": 6.02e-16,
"Xe136_0n2b_n2": None,
# Assuming two Majorons emitted i.e. only type IE or IID modes
# REF: M. Hirsh et al. Phys. Lett. B. 372, 8-14 (1996) - Table 3
"Xe136_0n2b_n3": 1.06e-17,
# REF: M. Hirsh et al. Phys. Lett. B. 372, 8-14 (1996) - Table 3
"Xe136_0n2b_n7": 4.54e-17
}
if __name__ == "__main__":
import argparse
from echidna.scripts.zero_nu_limit import ReadableDir
import echidna.output.store as store
parser = argparse.ArgumentParser(description="Example DBIsotpe calculator "
"script and validation.")
parser.add_argument("-s", "--signal", action=ReadableDir,
help="Supply path for signal hdf5 file")
args = parser.parse_args()
test(args)
| python | 21,291 |
from collections import deque
import hashlib
import logging
import os
import shutil
import stat
import tempfile
from io import open
from definitions import Opcode, ParcelType, DiffAction
from errors import Error, CodedError
from tools import process_stat
from protocol import send_response_header, send_parcel, send_empty_parcel, send_error
def handle_expand_path(args):
path = os.path.expanduser(args['path'])
if not os.path.exists(path):
send_error(Error.ENOENT, 'Path not found')
elif not os.path.isdir(path):
send_error(Error.ENOTDIR, 'Not a directory')
else:
send_response_header({'path': path})
def handle_ls(args):
base = os.path.expanduser(args['path'])
selfStat = os.stat(base)
result = { 'stat': process_stat(selfStat) }
if not stat.S_ISDIR(selfStat[stat.ST_MODE]):
send_response_header(result)
return
dirs = {}
dirLimit = 25
entryLimit = 2000
explore = deque(['.'])
while len(explore) > 0 and dirLimit > 0 and entryLimit > 0:
dirLimit -= 1
relPath = explore.popleft()
absPath = base if relPath == '.' else os.path.join(base, relPath)
try:
children = {}
for childName in os.listdir(absPath):
entryLimit -= 1
if entryLimit < 0 and len(dirs) > 0:
children = None
break
child_path = os.path.join(absPath, childName)
try:
childStat = os.stat(child_path)
children[childName] = process_stat(childStat)
isDir = stat.S_ISDIR(childStat[stat.ST_MODE])
if isDir and len(explore) < dirLimit:
explore.append(os.path.join(relPath, childName))
except OSError as err:
logging.warning('Skipping ' + child_path + ': ' + str(err))
if children is not None:
dirs[relPath] = children
except OSError as err:
logging.warning('Error: ' + str(err))
if len(dirs) == 0:
raise err # Only raise read errors on the first item.
result['dirs'] = dirs
send_response_header(result)
def handle_get_server_info(args):
settingsPath = os.path.expanduser('~/.pony-ssh/')
if not os.path.exists(settingsPath):
os.makedirs(settingsPath)
# Load or generate a cache key.
cacheKey = None
cacheKeyIsNew = False
cacheKeyFile = settingsPath + 'cache.key'
if os.path.exists(cacheKeyFile):
with open(cacheKeyFile, 'r') as keyFileHandle:
cacheKey = keyFileHandle.read(64)
if cacheKey == None or len(cacheKey) < 64:
cacheKeyIsNew = True
cacheKey = os.urandom(32).encode('hex')
with open(cacheKeyFile, 'wb') as keyFileHandle:
keyFileHandle.write(cacheKey)
send_response_header({
'home': os.path.expanduser('~'),
'cacheKey': cacheKey,
'newCacheKey': cacheKeyIsNew
})
def handle_file_read(args):
path = os.path.expanduser(args['path'])
# Open the file before sending a response header
fh = open(path, 'rb')
# If a hash has been supplied, check if it matches. IF so, shortcut download.
if 'cachedHash' in args:
hash = hashlib.md5(fh.read()).hexdigest()
if hash == args['cachedHash']:
send_response_header({'hashMatch': True})
return
fh.seek(0, 0)
length = os.path.getsize(path)
send_response_header({'length': length})
if length == 0:
return
chunkSize = 200 * 1024
while True:
chunk = fh.read(chunkSize)
if not chunk:
break
send_parcel(ParcelType.BODY, chunk)
fh.close()
send_parcel(ParcelType.ENDOFBODY, b'')
def handle_file_write_diff(args):
path = os.path.expanduser(args['path'])
if not os.path.exists(path):
raise OSError(Error.ENOENT, 'File not found')
with open(path, 'r', encoding='latin-1') as fh:
original_data = bytearray(fh.read(), 'latin-1')
original_hash = hashlib.md5(original_data).hexdigest()
if original_hash != args['hashBefore']:
raise CodedError(Error.EIO, 'File hash does not match client cached value: ' + args['hashBefore'] + ' vs ' + original_hash)
# Apply diff; comes in as a flat array containing pairs; action, action data.
updated_data = bytearray()
read_cursor = 0
diff = args['diff']
for i in range(0, len(diff), 2):
action = diff[i]
action_data = diff[i + 1]
if action == DiffAction.INSERTED:
updated_data.extend(bytearray(action_data, 'latin-1')) # Action data contains new data inserted
elif action == DiffAction.REMOVED:
read_cursor += action_data # Action data contains number of bytes to remove
else:
# Action data contains number of bytes to copy from original
updated_data.extend(original_data[read_cursor:read_cursor+action_data])
read_cursor += action_data
updated_hash = hashlib.md5(updated_data).hexdigest()
if updated_hash != args['hashAfter']:
raise CodedError(Error.EINVAL, 'File hash after changes applied does not match expected')
with open(path, 'wb') as fh:
fh.write(updated_data)
send_response_header({})
def handle_file_write(args):
path = os.path.expanduser(args['path'])
alreadyExists = os.path.exists(path)
if alreadyExists and not args['overwrite']:
raise OSError(Error.EEXIST, 'File already exists')
elif not alreadyExists and not args['create']:
raise OSError(Error.ENOENT, 'File not found')
fh = open(path, 'wb')
fh.write(args['data'])
fh.close()
send_response_header({})
def handle_mkdir(args):
path = os.path.expanduser(args['path'])
os.mkdir(path)
send_response_header({})
def handle_delete(args):
path = os.path.expanduser(args['path'])
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
send_response_header({})
def handle_rename(args):
fromPath = os.path.expanduser(args['from'])
toPath = os.path.expanduser(args['to'])
if os.path.exists(toPath):
if args['overwrite']:
os.unlink(toPath)
else:
raise OSError(Error.EEXIST, 'File already exists')
os.rename(fromPath, toPath)
send_response_header({})
message_handlers = {
Opcode.LS: handle_ls,
Opcode.GET_SERVER_INFO: handle_get_server_info,
Opcode.FILE_READ: handle_file_read,
Opcode.FILE_WRITE: handle_file_write,
Opcode.MKDIR: handle_mkdir,
Opcode.DELETE: handle_delete,
Opcode.RENAME: handle_rename,
Opcode.EXPAND_PATH: handle_expand_path,
Opcode.FILE_WRITE_DIFF: handle_file_write_diff,
}
| python | 6,929 |
#Script:Temperatura
'''
Programa que permita recibir por consola el valor de la temperatura
en grados centígrados, debe convertir este valor a grados Kelvin, y finalmente,
visualizar el resultado por pantalla. Considere la siguiente fórmula: °K = °C + 273.15.
'''
#INPUTS
print("Programa para convertir grados Centigrados °C a Kelvin °K")
print("Digite el valor en grados °C")
gradosC=float(input())
#PROCESS
conversor=gradosC+273,15
#OUTPUTS
print (gradosC, "°C equivale a",conversor,"°K")
| python | 491 |
# -*- coding: utf-8 -*-
# These classes are a simplified version of Disease and Diseases classes
# originally developed by Orion Buske in patient-similarity (https://github.com/buske/patient-similarity)
import logging
import re
from collections import defaultdict
from patientMatcher.resources import path_to_phenotype_annotations
LOG = logging.getLogger(__name__)
db_re = re.compile(r"([A-Z]+:\d+)")
FREQUENCY_TERMS = {
"HP:0040280": 1.0, # Obligate
"HP:0040281": (0.99 + 0.80) / 2.0, # Very frequent
"HP:0040282": (0.79 + 0.30) / 2.0, # Frequent
"HP:0040283": (0.05 + 0.29) / 2.0, # Occasional
"HP:0040284": (0.01 + 0.04) / 2.0, # Very rare
"HP:0040285": 0.0, # Excluded
}
FREQUENCIES = {
"very rare": 0.01,
"rare": 0.05,
"occasional": 0.075,
"frequent": 0.33,
"typical": 0.5,
"variable": 0.5,
"common": 0.75,
"hallmark": 0.9,
"obligate": 1.0,
}
class Disease:
"""An object representing a single disease"""
def __init__(self, db, db_id, phenotypes):
self.db = db
self.id = db_id
self.phenotype_freqs = phenotypes
def __str__(self):
return f"{self.db}:{self.id}"
class Diseases:
"""Create an object containing all diseases from the phenotype_annotations.tav.txt file
Resources included in this file: DECIPHER, OMIM, ORPHANET
"""
def init_app(self, app):
"""Initialize the diseases object when the app is launched"""
self.databases = ["DECIPHER", "OMIM", "ORPHA"]
self.diseases = {}
self._parse_diseases()
LOG.info(f"Parsed {len(self.diseases.keys())} disease/phenotypes from resource file")
def _parse_disease_frequency(self, field):
"""Parse disease frequency (col 8 in phenotype anno file)"""
if not field:
return None
if field.upper() in FREQUENCY_TERMS:
return FREQUENCY_TERMS[field]
field = field.lower()
if field in FREQUENCIES:
return FREQUENCIES[field]
if field.endswith("%"):
field = field.replace("%", "")
if "-" in field:
# Average any frequency ranges
low, high = field.split("-")
freq = (float(low) + float(high)) / 2 / 100
else:
freq = float(field) / 100
else:
try:
num, denom = fraction_frequency_re.split(field)
except Exception as ex:
LOG.warning(f"Error parsing frequency: {field} -> {ex}")
freq = default
else:
freq = float(num) / float(denom)
return freq
def _parse_diseases(self):
"""Parse diseases file, that is available under patientMatcher/resources"""
disease_phenotypes = defaultdict(dict)
with open(path_to_phenotype_annotations, encoding="utf-8") as disease_file:
for line in disease_file:
diseases = []
line = line.strip()
items = line.split("\t")
db = items[0]
if db not in self.databases:
continue
diseases.append((items[0].strip(), items[1].strip())) # diseases: [(OMIM, 102400)]
# Add alternative terms to list of diseases
alt_diseases = db_re.findall(items[5].strip()) # example: ['OMIM:147791']
for alt_disease in alt_diseases:
alt_db = alt_disease.split(":")[0].strip()
alt_db_id = int(alt_disease.split(":")[1].strip())
if alt_db in ["MIM", "IM"]:
alt_db = "OMIM"
if alt_db not in self.databases:
continue
diseases.append((alt_db, alt_db_id))
# Add HPO terms and frequencies to disease terms
hpo_term = items[4].strip()
freq = self._parse_disease_frequency(items[8])
for disease in diseases:
phenotypes = disease_phenotypes[disease]
# Collect max annotated frequency
if freq is not None and hpo_term in phenotypes:
old_freq = phenotypes[hpo_term]
if old_freq is None or old_freq < freq:
phenotypes[hpo_term] = freq
if old_freq != freq:
pass
else:
phenotypes[hpo_term] = freq
# phenotypes[hpo_term] = None # Do not populate disease frequencies
for (db, db_id), phenotypes in disease_phenotypes.items():
disease = Disease(db, db_id, phenotypes)
self.diseases[(db, db_id)] = disease
| python | 4,813 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GeographicReport'
db.create_table('geographic_report_geographicreport', (
('max_scale', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('geographic_report', ['GeographicReport'])
# Adding model 'Annotation'
db.create_table('geographic_report_annotation', (
('min', self.gf('django.db.models.fields.FloatField')(blank=True)),
('color', self.gf('django.db.models.fields.CharField')(default='000000', max_length=6)),
('max', self.gf('django.db.models.fields.FloatField')()),
('label', self.gf('django.db.models.fields.CharField')(max_length=30)),
('report', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geographic_report.GeographicReport'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('geographic_report', ['Annotation'])
def backwards(self, orm):
# Deleting model 'GeographicReport'
db.delete_table('geographic_report_geographicreport')
# Deleting model 'Annotation'
db.delete_table('geographic_report_annotation')
models = {
'geographic_report.annotation': {
'Meta': {'object_name': 'Annotation'},
'color': ('django.db.models.fields.CharField', [], {'default': "'000000'", 'max_length': '6'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'max': ('django.db.models.fields.FloatField', [], {}),
'min': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geographic_report.GeographicReport']"})
},
'geographic_report.geographicreport': {
'Meta': {'object_name': 'GeographicReport'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_scale': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['geographic_report']
| python | 2,701 |
"""
ArchiveBox config definitons (including defaults and dynamic config options).
Config Usage Example:
archivebox config --set MEDIA_TIMEOUT=600
env MEDIA_TIMEOUT=600 USE_COLOR=False ... archivebox [subcommand] ...
Config Precedence Order:
1. cli args (--update-all / --index-only / etc.)
2. shell environment vars (env USE_COLOR=False archivebox add '...')
3. config file (echo "SAVE_FAVICON=False" >> ArchiveBox.conf)
4. defaults (defined below in Python)
Documentation:
https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration
"""
__package__ = 'archivebox'
import os
import io
import re
import sys
import json
import getpass
import platform
import shutil
import sqlite3
import django
from hashlib import md5
from pathlib import Path
from datetime import datetime, timezone
from typing import Optional, Type, Tuple, Dict, Union, List
from subprocess import run, PIPE, DEVNULL
from configparser import ConfigParser
from collections import defaultdict
from .config_stubs import (
SimpleConfigValueDict,
ConfigValue,
ConfigDict,
ConfigDefaultValue,
ConfigDefaultDict,
)
############################### Config Schema ##################################
CONFIG_SCHEMA: Dict[str, ConfigDefaultDict] = {
'SHELL_CONFIG': {
'IS_TTY': {'type': bool, 'default': lambda _: sys.stdout.isatty()},
'USE_COLOR': {'type': bool, 'default': lambda c: c['IS_TTY']},
'SHOW_PROGRESS': {'type': bool, 'default': lambda c: (c['IS_TTY'] and platform.system() != 'Darwin')}, # progress bars are buggy on mac, disable for now
'IN_DOCKER': {'type': bool, 'default': False},
# TODO: 'SHOW_HINTS': {'type: bool, 'default': True},
},
'GENERAL_CONFIG': {
'OUTPUT_DIR': {'type': str, 'default': None},
'CONFIG_FILE': {'type': str, 'default': None},
'ONLY_NEW': {'type': bool, 'default': True},
'TIMEOUT': {'type': int, 'default': 60},
'MEDIA_TIMEOUT': {'type': int, 'default': 3600},
'OUTPUT_PERMISSIONS': {'type': str, 'default': '755'},
'RESTRICT_FILE_NAMES': {'type': str, 'default': 'windows'},
'URL_BLACKLIST': {'type': str, 'default': r'\.(css|js|otf|ttf|woff|woff2|gstatic\.com|googleapis\.com/css)(\?.*)?$'}, # to avoid downloading code assets as their own pages
},
'SERVER_CONFIG': {
'SECRET_KEY': {'type': str, 'default': None},
'BIND_ADDR': {'type': str, 'default': lambda c: ['127.0.0.1:8000', '0.0.0.0:8000'][c['IN_DOCKER']]},
'ALLOWED_HOSTS': {'type': str, 'default': '*'},
'DEBUG': {'type': bool, 'default': False},
'PUBLIC_INDEX': {'type': bool, 'default': True},
'PUBLIC_SNAPSHOTS': {'type': bool, 'default': True},
'PUBLIC_ADD_VIEW': {'type': bool, 'default': False},
'FOOTER_INFO': {'type': str, 'default': 'Content is hosted for personal archiving purposes only. Contact server owner for any takedown requests.'},
'SNAPSHOTS_PER_PAGE': {'type': int, 'default': 40},
'CUSTOM_TEMPLATES_DIR': {'type': str, 'default': None},
'TIME_ZONE': {'type': str, 'default': 'UTC'},
},
'ARCHIVE_METHOD_TOGGLES': {
'SAVE_TITLE': {'type': bool, 'default': True, 'aliases': ('FETCH_TITLE',)},
'SAVE_FAVICON': {'type': bool, 'default': True, 'aliases': ('FETCH_FAVICON',)},
'SAVE_WGET': {'type': bool, 'default': True, 'aliases': ('FETCH_WGET',)},
'SAVE_WGET_REQUISITES': {'type': bool, 'default': True, 'aliases': ('FETCH_WGET_REQUISITES',)},
'SAVE_SINGLEFILE': {'type': bool, 'default': True, 'aliases': ('FETCH_SINGLEFILE',)},
'SAVE_READABILITY': {'type': bool, 'default': True, 'aliases': ('FETCH_READABILITY',)},
'SAVE_MERCURY': {'type': bool, 'default': True, 'aliases': ('FETCH_MERCURY',)},
'SAVE_PDF': {'type': bool, 'default': True, 'aliases': ('FETCH_PDF',)},
'SAVE_SCREENSHOT': {'type': bool, 'default': True, 'aliases': ('FETCH_SCREENSHOT',)},
'SAVE_DOM': {'type': bool, 'default': True, 'aliases': ('FETCH_DOM',)},
'SAVE_HEADERS': {'type': bool, 'default': True, 'aliases': ('FETCH_HEADERS',)},
'SAVE_WARC': {'type': bool, 'default': True, 'aliases': ('FETCH_WARC',)},
'SAVE_GIT': {'type': bool, 'default': True, 'aliases': ('FETCH_GIT',)},
'SAVE_MEDIA': {'type': bool, 'default': True, 'aliases': ('FETCH_MEDIA',)},
'SAVE_ARCHIVE_DOT_ORG': {'type': bool, 'default': True, 'aliases': ('SUBMIT_ARCHIVE_DOT_ORG',)},
},
'ARCHIVE_METHOD_OPTIONS': {
'RESOLUTION': {'type': str, 'default': '1440,2000', 'aliases': ('SCREENSHOT_RESOLUTION',)},
'GIT_DOMAINS': {'type': str, 'default': 'github.com,bitbucket.org,gitlab.com,gist.github.com'},
'CHECK_SSL_VALIDITY': {'type': bool, 'default': True},
'MEDIA_MAX_SIZE': {'type': str, 'default': '750m'},
'CURL_USER_AGENT': {'type': str, 'default': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.61 Safari/537.36 ArchiveBox/{VERSION} (+https://github.com/ArchiveBox/ArchiveBox/) curl/{CURL_VERSION}'},
'WGET_USER_AGENT': {'type': str, 'default': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.61 Safari/537.36 ArchiveBox/{VERSION} (+https://github.com/ArchiveBox/ArchiveBox/) wget/{WGET_VERSION}'},
'CHROME_USER_AGENT': {'type': str, 'default': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.61 Safari/537.36 ArchiveBox/{VERSION} (+https://github.com/ArchiveBox/ArchiveBox/)'},
'COOKIES_FILE': {'type': str, 'default': None},
'CHROME_USER_DATA_DIR': {'type': str, 'default': None},
'CHROME_HEADLESS': {'type': bool, 'default': True},
'CHROME_SANDBOX': {'type': bool, 'default': lambda c: not c['IN_DOCKER']},
'YOUTUBEDL_ARGS': {'type': list, 'default': lambda c: [
'--write-description',
'--write-info-json',
'--write-annotations',
'--write-thumbnail',
'--no-call-home',
'--write-sub',
'--all-subs',
'--write-auto-sub',
'--convert-subs=srt',
'--yes-playlist',
'--continue',
'--ignore-errors',
'--geo-bypass',
'--add-metadata',
'--max-filesize={}'.format(c['MEDIA_MAX_SIZE']),
]},
'WGET_ARGS': {'type': list, 'default': ['--no-verbose',
'--adjust-extension',
'--convert-links',
'--force-directories',
'--backup-converted',
'--span-hosts',
'--no-parent',
'-e', 'robots=off',
]},
'CURL_ARGS': {'type': list, 'default': ['--silent',
'--location',
'--compressed'
]},
'GIT_ARGS': {'type': list, 'default': ['--recursive']},
},
'SEARCH_BACKEND_CONFIG' : {
'USE_INDEXING_BACKEND': {'type': bool, 'default': True},
'USE_SEARCHING_BACKEND': {'type': bool, 'default': True},
'SEARCH_BACKEND_ENGINE': {'type': str, 'default': 'ripgrep'},
'SEARCH_BACKEND_HOST_NAME': {'type': str, 'default': 'localhost'},
'SEARCH_BACKEND_PORT': {'type': int, 'default': 1491},
'SEARCH_BACKEND_PASSWORD': {'type': str, 'default': 'SecretPassword'},
# SONIC
'SONIC_COLLECTION': {'type': str, 'default': 'archivebox'},
'SONIC_BUCKET': {'type': str, 'default': 'snapshots'},
'SEARCH_BACKEND_TIMEOUT': {'type': int, 'default': 90},
},
'DEPENDENCY_CONFIG': {
'USE_CURL': {'type': bool, 'default': True},
'USE_WGET': {'type': bool, 'default': True},
'USE_SINGLEFILE': {'type': bool, 'default': True},
'USE_READABILITY': {'type': bool, 'default': True},
'USE_MERCURY': {'type': bool, 'default': True},
'USE_GIT': {'type': bool, 'default': True},
'USE_CHROME': {'type': bool, 'default': True},
'USE_NODE': {'type': bool, 'default': True},
'USE_YOUTUBEDL': {'type': bool, 'default': True},
'USE_RIPGREP': {'type': bool, 'default': True},
'CURL_BINARY': {'type': str, 'default': 'curl'},
'GIT_BINARY': {'type': str, 'default': 'git'},
'WGET_BINARY': {'type': str, 'default': 'wget'},
'SINGLEFILE_BINARY': {'type': str, 'default': lambda c: bin_path('single-file')},
'READABILITY_BINARY': {'type': str, 'default': lambda c: bin_path('readability-extractor')},
'MERCURY_BINARY': {'type': str, 'default': lambda c: bin_path('mercury-parser')},
'YOUTUBEDL_BINARY': {'type': str, 'default': 'youtube-dl'},
'NODE_BINARY': {'type': str, 'default': 'node'},
'RIPGREP_BINARY': {'type': str, 'default': 'rg'},
'CHROME_BINARY': {'type': str, 'default': None},
'POCKET_CONSUMER_KEY': {'type': str, 'default': None},
'POCKET_ACCESS_TOKENS': {'type': dict, 'default': {}},
},
}
########################## Backwards-Compatibility #############################
# for backwards compatibility with old config files, check old/deprecated names for each key
CONFIG_ALIASES = {
alias: key
for section in CONFIG_SCHEMA.values()
for key, default in section.items()
for alias in default.get('aliases', ())
}
USER_CONFIG = {key for section in CONFIG_SCHEMA.values() for key in section.keys()}
def get_real_name(key: str) -> str:
"""get the current canonical name for a given deprecated config key"""
return CONFIG_ALIASES.get(key.upper().strip(), key.upper().strip())
################################ Constants #####################################
PACKAGE_DIR_NAME = 'archivebox'
TEMPLATES_DIR_NAME = 'templates'
ARCHIVE_DIR_NAME = 'archive'
SOURCES_DIR_NAME = 'sources'
LOGS_DIR_NAME = 'logs'
SQL_INDEX_FILENAME = 'index.sqlite3'
JSON_INDEX_FILENAME = 'index.json'
HTML_INDEX_FILENAME = 'index.html'
ROBOTS_TXT_FILENAME = 'robots.txt'
FAVICON_FILENAME = 'favicon.ico'
CONFIG_FILENAME = 'ArchiveBox.conf'
DEFAULT_CLI_COLORS = {
'reset': '\033[00;00m',
'lightblue': '\033[01;30m',
'lightyellow': '\033[01;33m',
'lightred': '\033[01;35m',
'red': '\033[01;31m',
'green': '\033[01;32m',
'blue': '\033[01;34m',
'white': '\033[01;37m',
'black': '\033[01;30m',
}
ANSI = {k: '' for k in DEFAULT_CLI_COLORS.keys()}
COLOR_DICT = defaultdict(lambda: [(0, 0, 0), (0, 0, 0)], {
'00': [(0, 0, 0), (0, 0, 0)],
'30': [(0, 0, 0), (0, 0, 0)],
'31': [(255, 0, 0), (128, 0, 0)],
'32': [(0, 200, 0), (0, 128, 0)],
'33': [(255, 255, 0), (128, 128, 0)],
'34': [(0, 0, 255), (0, 0, 128)],
'35': [(255, 0, 255), (128, 0, 128)],
'36': [(0, 255, 255), (0, 128, 128)],
'37': [(255, 255, 255), (255, 255, 255)],
})
STATICFILE_EXTENSIONS = {
# 99.999% of the time, URLs ending in these extensions are static files
# that can be downloaded as-is, not html pages that need to be rendered
'gif', 'jpeg', 'jpg', 'png', 'tif', 'tiff', 'wbmp', 'ico', 'jng', 'bmp',
'svg', 'svgz', 'webp', 'ps', 'eps', 'ai',
'mp3', 'mp4', 'm4a', 'mpeg', 'mpg', 'mkv', 'mov', 'webm', 'm4v',
'flv', 'wmv', 'avi', 'ogg', 'ts', 'm3u8',
'pdf', 'txt', 'rtf', 'rtfd', 'doc', 'docx', 'ppt', 'pptx', 'xls', 'xlsx',
'atom', 'rss', 'css', 'js', 'json',
'dmg', 'iso', 'img',
'rar', 'war', 'hqx', 'zip', 'gz', 'bz2', '7z',
# Less common extensions to consider adding later
# jar, swf, bin, com, exe, dll, deb
# ear, hqx, eot, wmlc, kml, kmz, cco, jardiff, jnlp, run, msi, msp, msm,
# pl pm, prc pdb, rar, rpm, sea, sit, tcl tk, der, pem, crt, xpi, xspf,
# ra, mng, asx, asf, 3gpp, 3gp, mid, midi, kar, jad, wml, htc, mml
# These are always treated as pages, not as static files, never add them:
# html, htm, shtml, xhtml, xml, aspx, php, cgi
}
# When initializing archivebox in a new directory, we check to make sure the dir is
# actually empty so that we dont clobber someone's home directory or desktop by accident.
# These files are exceptions to the is_empty check when we're trying to init a new dir,
# as they could be from a previous archivebox version, system artifacts, dependencies, etc.
ALLOWED_IN_OUTPUT_DIR = {
'.gitignore',
'lost+found',
'.DS_Store',
'.venv',
'venv',
'virtualenv',
'.virtualenv',
'node_modules',
'package.json',
'package-lock.json',
'yarn.lock',
'static',
'sonic',
ARCHIVE_DIR_NAME,
SOURCES_DIR_NAME,
LOGS_DIR_NAME,
SQL_INDEX_FILENAME,
f'{SQL_INDEX_FILENAME}-wal',
f'{SQL_INDEX_FILENAME}-shm',
JSON_INDEX_FILENAME,
HTML_INDEX_FILENAME,
ROBOTS_TXT_FILENAME,
FAVICON_FILENAME,
CONFIG_FILENAME,
f'{CONFIG_FILENAME}.bak',
'static_index.json',
}
############################## Derived Config ##################################
DYNAMIC_CONFIG_SCHEMA: ConfigDefaultDict = {
'TERM_WIDTH': {'default': lambda c: lambda: shutil.get_terminal_size((100, 10)).columns},
'USER': {'default': lambda c: getpass.getuser() or os.getlogin()},
'ANSI': {'default': lambda c: DEFAULT_CLI_COLORS if c['USE_COLOR'] else {k: '' for k in DEFAULT_CLI_COLORS.keys()}},
'PACKAGE_DIR': {'default': lambda c: Path(__file__).resolve().parent},
'TEMPLATES_DIR': {'default': lambda c: c['PACKAGE_DIR'] / TEMPLATES_DIR_NAME},
'CUSTOM_TEMPLATES_DIR': {'default': lambda c: c['CUSTOM_TEMPLATES_DIR'] and Path(c['CUSTOM_TEMPLATES_DIR'])},
'OUTPUT_DIR': {'default': lambda c: Path(c['OUTPUT_DIR']).resolve() if c['OUTPUT_DIR'] else Path(os.curdir).resolve()},
'ARCHIVE_DIR': {'default': lambda c: c['OUTPUT_DIR'] / ARCHIVE_DIR_NAME},
'SOURCES_DIR': {'default': lambda c: c['OUTPUT_DIR'] / SOURCES_DIR_NAME},
'LOGS_DIR': {'default': lambda c: c['OUTPUT_DIR'] / LOGS_DIR_NAME},
'CONFIG_FILE': {'default': lambda c: Path(c['CONFIG_FILE']).resolve() if c['CONFIG_FILE'] else c['OUTPUT_DIR'] / CONFIG_FILENAME},
'COOKIES_FILE': {'default': lambda c: c['COOKIES_FILE'] and Path(c['COOKIES_FILE']).resolve()},
'CHROME_USER_DATA_DIR': {'default': lambda c: find_chrome_data_dir() if c['CHROME_USER_DATA_DIR'] is None else (Path(c['CHROME_USER_DATA_DIR']).resolve() if c['CHROME_USER_DATA_DIR'] else None)}, # None means unset, so we autodetect it with find_chrome_Data_dir(), but emptystring '' means user manually set it to '', and we should store it as None
'URL_BLACKLIST_PTN': {'default': lambda c: c['URL_BLACKLIST'] and re.compile(c['URL_BLACKLIST'] or '', re.IGNORECASE | re.UNICODE | re.MULTILINE)},
'ARCHIVEBOX_BINARY': {'default': lambda c: sys.argv[0] or bin_path('archivebox')},
'VERSION': {'default': lambda c: json.loads((Path(c['PACKAGE_DIR']) / 'package.json').read_text(encoding='utf-8').strip())['version']},
'PYTHON_BINARY': {'default': lambda c: sys.executable},
'PYTHON_ENCODING': {'default': lambda c: sys.stdout.encoding.upper()},
'PYTHON_VERSION': {'default': lambda c: '{}.{}.{}'.format(*sys.version_info[:3])},
'DJANGO_BINARY': {'default': lambda c: django.__file__.replace('__init__.py', 'bin/django-admin.py')},
'DJANGO_VERSION': {'default': lambda c: '{}.{}.{} {} ({})'.format(*django.VERSION)},
'USE_CURL': {'default': lambda c: c['USE_CURL'] and (c['SAVE_FAVICON'] or c['SAVE_TITLE'] or c['SAVE_ARCHIVE_DOT_ORG'])},
'CURL_VERSION': {'default': lambda c: bin_version(c['CURL_BINARY']) if c['USE_CURL'] else None},
'CURL_USER_AGENT': {'default': lambda c: c['CURL_USER_AGENT'].format(**c)},
'CURL_ARGS': {'default': lambda c: c['CURL_ARGS'] or []},
'SAVE_FAVICON': {'default': lambda c: c['USE_CURL'] and c['SAVE_FAVICON']},
'SAVE_ARCHIVE_DOT_ORG': {'default': lambda c: c['USE_CURL'] and c['SAVE_ARCHIVE_DOT_ORG']},
'USE_WGET': {'default': lambda c: c['USE_WGET'] and (c['SAVE_WGET'] or c['SAVE_WARC'])},
'WGET_VERSION': {'default': lambda c: bin_version(c['WGET_BINARY']) if c['USE_WGET'] else None},
'WGET_AUTO_COMPRESSION': {'default': lambda c: wget_supports_compression(c) if c['USE_WGET'] else False},
'WGET_USER_AGENT': {'default': lambda c: c['WGET_USER_AGENT'].format(**c)},
'SAVE_WGET': {'default': lambda c: c['USE_WGET'] and c['SAVE_WGET']},
'SAVE_WARC': {'default': lambda c: c['USE_WGET'] and c['SAVE_WARC']},
'WGET_ARGS': {'default': lambda c: c['WGET_ARGS'] or []},
'RIPGREP_VERSION': {'default': lambda c: bin_version(c['RIPGREP_BINARY']) if c['USE_RIPGREP'] else None},
'USE_SINGLEFILE': {'default': lambda c: c['USE_SINGLEFILE'] and c['SAVE_SINGLEFILE']},
'SINGLEFILE_VERSION': {'default': lambda c: bin_version(c['SINGLEFILE_BINARY']) if c['USE_SINGLEFILE'] else None},
'USE_READABILITY': {'default': lambda c: c['USE_READABILITY'] and c['SAVE_READABILITY']},
'READABILITY_VERSION': {'default': lambda c: bin_version(c['READABILITY_BINARY']) if c['USE_READABILITY'] else None},
'USE_MERCURY': {'default': lambda c: c['USE_MERCURY'] and c['SAVE_MERCURY']},
'MERCURY_VERSION': {'default': lambda c: '1.0.0' if shutil.which(str(bin_path(c['MERCURY_BINARY']))) else None}, # mercury is unversioned
'USE_GIT': {'default': lambda c: c['USE_GIT'] and c['SAVE_GIT']},
'GIT_VERSION': {'default': lambda c: bin_version(c['GIT_BINARY']) if c['USE_GIT'] else None},
'SAVE_GIT': {'default': lambda c: c['USE_GIT'] and c['SAVE_GIT']},
'USE_YOUTUBEDL': {'default': lambda c: c['USE_YOUTUBEDL'] and c['SAVE_MEDIA']},
'YOUTUBEDL_VERSION': {'default': lambda c: bin_version(c['YOUTUBEDL_BINARY']) if c['USE_YOUTUBEDL'] else None},
'SAVE_MEDIA': {'default': lambda c: c['USE_YOUTUBEDL'] and c['SAVE_MEDIA']},
'YOUTUBEDL_ARGS': {'default': lambda c: c['YOUTUBEDL_ARGS'] or []},
'CHROME_BINARY': {'default': lambda c: c['CHROME_BINARY'] or find_chrome_binary()},
'USE_CHROME': {'default': lambda c: c['USE_CHROME'] and c['CHROME_BINARY'] and (c['SAVE_PDF'] or c['SAVE_SCREENSHOT'] or c['SAVE_DOM'] or c['SAVE_SINGLEFILE'])},
'CHROME_VERSION': {'default': lambda c: bin_version(c['CHROME_BINARY']) if c['USE_CHROME'] else None},
'SAVE_PDF': {'default': lambda c: c['USE_CHROME'] and c['SAVE_PDF']},
'SAVE_SCREENSHOT': {'default': lambda c: c['USE_CHROME'] and c['SAVE_SCREENSHOT']},
'SAVE_DOM': {'default': lambda c: c['USE_CHROME'] and c['SAVE_DOM']},
'SAVE_SINGLEFILE': {'default': lambda c: c['USE_CHROME'] and c['SAVE_SINGLEFILE'] and c['USE_NODE']},
'SAVE_READABILITY': {'default': lambda c: c['USE_READABILITY'] and c['USE_NODE']},
'SAVE_MERCURY': {'default': lambda c: c['USE_MERCURY'] and c['USE_NODE']},
'USE_NODE': {'default': lambda c: c['USE_NODE'] and (c['SAVE_READABILITY'] or c['SAVE_SINGLEFILE'] or c['SAVE_MERCURY'])},
'NODE_VERSION': {'default': lambda c: bin_version(c['NODE_BINARY']) if c['USE_NODE'] else None},
'DEPENDENCIES': {'default': lambda c: get_dependency_info(c)},
'CODE_LOCATIONS': {'default': lambda c: get_code_locations(c)},
'EXTERNAL_LOCATIONS': {'default': lambda c: get_external_locations(c)},
'DATA_LOCATIONS': {'default': lambda c: get_data_locations(c)},
'CHROME_OPTIONS': {'default': lambda c: get_chrome_info(c)},
}
################################### Helpers ####################################
def load_config_val(key: str,
default: ConfigDefaultValue=None,
type: Optional[Type]=None,
aliases: Optional[Tuple[str, ...]]=None,
config: Optional[ConfigDict]=None,
env_vars: Optional[os._Environ]=None,
config_file_vars: Optional[Dict[str, str]]=None) -> ConfigValue:
"""parse bool, int, and str key=value pairs from env"""
config_keys_to_check = (key, *(aliases or ()))
for key in config_keys_to_check:
if env_vars:
val = env_vars.get(key)
if val:
break
if config_file_vars:
val = config_file_vars.get(key)
if val:
break
if type is None or val is None:
if callable(default):
assert isinstance(config, dict)
return default(config)
return default
elif type is bool:
if val.lower() in ('true', 'yes', '1'):
return True
elif val.lower() in ('false', 'no', '0'):
return False
else:
raise ValueError(f'Invalid configuration option {key}={val} (expected a boolean: True/False)')
elif type is str:
if val.lower() in ('true', 'false', 'yes', 'no', '1', '0'):
raise ValueError(f'Invalid configuration option {key}={val} (expected a string)')
return val.strip()
elif type is int:
if not val.isdigit():
raise ValueError(f'Invalid configuration option {key}={val} (expected an integer)')
return int(val)
elif type is list or type is dict:
return json.loads(val)
raise Exception('Config values can only be str, bool, int or json')
def load_config_file(out_dir: str=None) -> Optional[Dict[str, str]]:
"""load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf"""
out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve()
config_path = Path(out_dir) / CONFIG_FILENAME
if config_path.exists():
config_file = ConfigParser()
config_file.optionxform = str
config_file.read(config_path)
# flatten into one namespace
config_file_vars = {
key.upper(): val
for section, options in config_file.items()
for key, val in options.items()
}
# print('[i] Loaded config file', os.path.abspath(config_path))
# print(config_file_vars)
return config_file_vars
return None
def write_config_file(config: Dict[str, str], out_dir: str=None) -> ConfigDict:
"""load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf"""
from .system import atomic_write
CONFIG_HEADER = (
"""# This is the config file for your ArchiveBox collection.
#
# You can add options here manually in INI format, or automatically by running:
# archivebox config --set KEY=VALUE
#
# If you modify this file manually, make sure to update your archive after by running:
# archivebox init
#
# A list of all possible config with documentation and examples can be found here:
# https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration
""")
out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve()
config_path = Path(out_dir) / CONFIG_FILENAME
if not config_path.exists():
atomic_write(config_path, CONFIG_HEADER)
config_file = ConfigParser()
config_file.optionxform = str
config_file.read(config_path)
with open(config_path, 'r', encoding='utf-8') as old:
atomic_write(f'{config_path}.bak', old.read())
find_section = lambda key: [name for name, opts in CONFIG_SCHEMA.items() if key in opts][0]
# Set up sections in empty config file
for key, val in config.items():
section = find_section(key)
if section in config_file:
existing_config = dict(config_file[section])
else:
existing_config = {}
config_file[section] = {**existing_config, key: val}
# always make sure there's a SECRET_KEY defined for Django
existing_secret_key = None
if 'SERVER_CONFIG' in config_file and 'SECRET_KEY' in config_file['SERVER_CONFIG']:
existing_secret_key = config_file['SERVER_CONFIG']['SECRET_KEY']
if (not existing_secret_key) or ('not a valid secret' in existing_secret_key):
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
random_secret_key = get_random_string(50, chars)
if 'SERVER_CONFIG' in config_file:
config_file['SERVER_CONFIG']['SECRET_KEY'] = random_secret_key
else:
config_file['SERVER_CONFIG'] = {'SECRET_KEY': random_secret_key}
with open(config_path, 'w+', encoding='utf-8') as new:
config_file.write(new)
try:
# validate the config by attempting to re-parse it
CONFIG = load_all_config()
except BaseException: # lgtm [py/catch-base-exception]
# something went horribly wrong, rever to the previous version
with open(f'{config_path}.bak', 'r', encoding='utf-8') as old:
atomic_write(config_path, old.read())
raise
if Path(f'{config_path}.bak').exists():
os.remove(f'{config_path}.bak')
return {
key.upper(): CONFIG.get(key.upper())
for key in config.keys()
}
def load_config(defaults: ConfigDefaultDict,
config: Optional[ConfigDict]=None,
out_dir: Optional[str]=None,
env_vars: Optional[os._Environ]=None,
config_file_vars: Optional[Dict[str, str]]=None) -> ConfigDict:
env_vars = env_vars or os.environ
config_file_vars = config_file_vars or load_config_file(out_dir=out_dir)
extended_config: ConfigDict = config.copy() if config else {}
for key, default in defaults.items():
try:
extended_config[key] = load_config_val(
key,
default=default['default'],
type=default.get('type'),
aliases=default.get('aliases'),
config=extended_config,
env_vars=env_vars,
config_file_vars=config_file_vars,
)
except KeyboardInterrupt:
raise SystemExit(0)
except Exception as e:
stderr()
stderr(f'[X] Error while loading configuration value: {key}', color='red', config=extended_config)
stderr(' {}: {}'.format(e.__class__.__name__, e))
stderr()
stderr(' Check your config for mistakes and try again (your archive data is unaffected).')
stderr()
stderr(' For config documentation and examples see:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration')
stderr()
# raise
raise SystemExit(2)
return extended_config
# def write_config(config: ConfigDict):
# with open(os.path.join(config['OUTPUT_DIR'], CONFIG_FILENAME), 'w+') as f:
# Logging Helpers
def stdout(*args, color: Optional[str]=None, prefix: str='', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if color:
strs = [ansi[color], ' '.join(str(a) for a in args), ansi['reset'], '\n']
else:
strs = [' '.join(str(a) for a in args), '\n']
sys.stdout.write(prefix + ''.join(strs))
def stderr(*args, color: Optional[str]=None, prefix: str='', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if color:
strs = [ansi[color], ' '.join(str(a) for a in args), ansi['reset'], '\n']
else:
strs = [' '.join(str(a) for a in args), '\n']
sys.stderr.write(prefix + ''.join(strs))
def hint(text: Union[Tuple[str, ...], List[str], str], prefix=' ', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if isinstance(text, str):
stderr('{}{lightred}Hint:{reset} {}'.format(prefix, text, **ansi))
else:
stderr('{}{lightred}Hint:{reset} {}'.format(prefix, text[0], **ansi))
for line in text[1:]:
stderr('{} {}'.format(prefix, line))
# Dependency Metadata Helpers
def bin_version(binary: Optional[str]) -> Optional[str]:
"""check the presence and return valid version line of a specified binary"""
abspath = bin_path(binary)
if not binary or not abspath:
return None
try:
version_str = run([abspath, "--version"], stdout=PIPE).stdout.strip().decode()
# take first 3 columns of first line of version info
return ' '.join(version_str.split('\n')[0].strip().split()[:3])
except OSError:
pass
# stderr(f'[X] Unable to find working version of dependency: {binary}', color='red')
# stderr(' Make sure it\'s installed, then confirm it\'s working by running:')
# stderr(f' {binary} --version')
# stderr()
# stderr(' If you don\'t want to install it, you can disable it via config. See here for more info:')
# stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Install')
return None
def bin_path(binary: Optional[str]) -> Optional[str]:
if binary is None:
return None
node_modules_bin = Path('.') / 'node_modules' / '.bin' / binary
if node_modules_bin.exists():
return str(node_modules_bin.resolve())
return shutil.which(str(Path(binary).expanduser())) or shutil.which(str(binary)) or binary
def bin_hash(binary: Optional[str]) -> Optional[str]:
if binary is None:
return None
abs_path = bin_path(binary)
if abs_path is None or not Path(abs_path).exists():
return None
file_hash = md5()
with io.open(abs_path, mode='rb') as f:
for chunk in iter(lambda: f.read(io.DEFAULT_BUFFER_SIZE), b''):
file_hash.update(chunk)
return f'md5:{file_hash.hexdigest()}'
def find_chrome_binary() -> Optional[str]:
"""find any installed chrome binaries in the default locations"""
# Precedence: Chromium, Chrome, Beta, Canary, Unstable, Dev
# make sure data dir finding precedence order always matches binary finding order
default_executable_paths = (
'chromium-browser',
'chromium',
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'chrome',
'google-chrome',
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
'google-chrome-stable',
'google-chrome-beta',
'google-chrome-canary',
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary',
'google-chrome-unstable',
'google-chrome-dev',
)
for name in default_executable_paths:
full_path_exists = shutil.which(name)
if full_path_exists:
return name
return None
def find_chrome_data_dir() -> Optional[str]:
"""find any installed chrome user data directories in the default locations"""
# Precedence: Chromium, Chrome, Beta, Canary, Unstable, Dev
# make sure data dir finding precedence order always matches binary finding order
default_profile_paths = (
'~/.config/chromium',
'~/Library/Application Support/Chromium',
'~/AppData/Local/Chromium/User Data',
'~/.config/chrome',
'~/.config/google-chrome',
'~/Library/Application Support/Google/Chrome',
'~/AppData/Local/Google/Chrome/User Data',
'~/.config/google-chrome-stable',
'~/.config/google-chrome-beta',
'~/Library/Application Support/Google/Chrome Canary',
'~/AppData/Local/Google/Chrome SxS/User Data',
'~/.config/google-chrome-unstable',
'~/.config/google-chrome-dev',
)
for path in default_profile_paths:
full_path = Path(path).resolve()
if full_path.exists():
return full_path
return None
def wget_supports_compression(config):
try:
cmd = [
config['WGET_BINARY'],
"--compression=auto",
"--help",
]
return not run(cmd, stdout=DEVNULL, stderr=DEVNULL).returncode
except (FileNotFoundError, OSError):
return False
def get_code_locations(config: ConfigDict) -> SimpleConfigValueDict:
return {
'PACKAGE_DIR': {
'path': (config['PACKAGE_DIR']).resolve(),
'enabled': True,
'is_valid': (config['PACKAGE_DIR'] / '__main__.py').exists(),
},
'TEMPLATES_DIR': {
'path': (config['TEMPLATES_DIR']).resolve(),
'enabled': True,
'is_valid': (config['TEMPLATES_DIR'] / 'static').exists(),
},
'CUSTOM_TEMPLATES_DIR': {
'path': config['CUSTOM_TEMPLATES_DIR'] and Path(config['CUSTOM_TEMPLATES_DIR']).resolve(),
'enabled': bool(config['CUSTOM_TEMPLATES_DIR']),
'is_valid': config['CUSTOM_TEMPLATES_DIR'] and Path(config['CUSTOM_TEMPLATES_DIR']).exists(),
},
# 'NODE_MODULES_DIR': {
# 'path': ,
# 'enabled': ,
# 'is_valid': (...).exists(),
# },
}
def get_external_locations(config: ConfigDict) -> ConfigValue:
abspath = lambda path: None if path is None else Path(path).resolve()
return {
'CHROME_USER_DATA_DIR': {
'path': abspath(config['CHROME_USER_DATA_DIR']),
'enabled': config['USE_CHROME'] and config['CHROME_USER_DATA_DIR'],
'is_valid': False if config['CHROME_USER_DATA_DIR'] is None else (Path(config['CHROME_USER_DATA_DIR']) / 'Default').exists(),
},
'COOKIES_FILE': {
'path': abspath(config['COOKIES_FILE']),
'enabled': config['USE_WGET'] and config['COOKIES_FILE'],
'is_valid': False if config['COOKIES_FILE'] is None else Path(config['COOKIES_FILE']).exists(),
},
}
def get_data_locations(config: ConfigDict) -> ConfigValue:
return {
'OUTPUT_DIR': {
'path': config['OUTPUT_DIR'].resolve(),
'enabled': True,
'is_valid': (config['OUTPUT_DIR'] / SQL_INDEX_FILENAME).exists(),
},
'SOURCES_DIR': {
'path': config['SOURCES_DIR'].resolve(),
'enabled': True,
'is_valid': config['SOURCES_DIR'].exists(),
},
'LOGS_DIR': {
'path': config['LOGS_DIR'].resolve(),
'enabled': True,
'is_valid': config['LOGS_DIR'].exists(),
},
'ARCHIVE_DIR': {
'path': config['ARCHIVE_DIR'].resolve(),
'enabled': True,
'is_valid': config['ARCHIVE_DIR'].exists(),
},
'CONFIG_FILE': {
'path': config['CONFIG_FILE'].resolve(),
'enabled': True,
'is_valid': config['CONFIG_FILE'].exists(),
},
'SQL_INDEX': {
'path': (config['OUTPUT_DIR'] / SQL_INDEX_FILENAME).resolve(),
'enabled': True,
'is_valid': (config['OUTPUT_DIR'] / SQL_INDEX_FILENAME).exists(),
},
}
def get_dependency_info(config: ConfigDict) -> ConfigValue:
return {
'ARCHIVEBOX_BINARY': {
'path': bin_path(config['ARCHIVEBOX_BINARY']),
'version': config['VERSION'],
'hash': bin_hash(config['ARCHIVEBOX_BINARY']),
'enabled': True,
'is_valid': True,
},
'PYTHON_BINARY': {
'path': bin_path(config['PYTHON_BINARY']),
'version': config['PYTHON_VERSION'],
'hash': bin_hash(config['PYTHON_BINARY']),
'enabled': True,
'is_valid': bool(config['PYTHON_VERSION']),
},
'DJANGO_BINARY': {
'path': bin_path(config['DJANGO_BINARY']),
'version': config['DJANGO_VERSION'],
'hash': bin_hash(config['DJANGO_BINARY']),
'enabled': True,
'is_valid': bool(config['DJANGO_VERSION']),
},
'CURL_BINARY': {
'path': bin_path(config['CURL_BINARY']),
'version': config['CURL_VERSION'],
'hash': bin_hash(config['CURL_BINARY']),
'enabled': config['USE_CURL'],
'is_valid': bool(config['CURL_VERSION']),
},
'WGET_BINARY': {
'path': bin_path(config['WGET_BINARY']),
'version': config['WGET_VERSION'],
'hash': bin_hash(config['WGET_BINARY']),
'enabled': config['USE_WGET'],
'is_valid': bool(config['WGET_VERSION']),
},
'NODE_BINARY': {
'path': bin_path(config['NODE_BINARY']),
'version': config['NODE_VERSION'],
'hash': bin_hash(config['NODE_BINARY']),
'enabled': config['USE_NODE'],
'is_valid': bool(config['NODE_VERSION']),
},
'SINGLEFILE_BINARY': {
'path': bin_path(config['SINGLEFILE_BINARY']),
'version': config['SINGLEFILE_VERSION'],
'hash': bin_hash(config['SINGLEFILE_BINARY']),
'enabled': config['USE_SINGLEFILE'],
'is_valid': bool(config['SINGLEFILE_VERSION']),
},
'READABILITY_BINARY': {
'path': bin_path(config['READABILITY_BINARY']),
'version': config['READABILITY_VERSION'],
'hash': bin_hash(config['READABILITY_BINARY']),
'enabled': config['USE_READABILITY'],
'is_valid': bool(config['READABILITY_VERSION']),
},
'MERCURY_BINARY': {
'path': bin_path(config['MERCURY_BINARY']),
'version': config['MERCURY_VERSION'],
'hash': bin_hash(config['MERCURY_BINARY']),
'enabled': config['USE_MERCURY'],
'is_valid': bool(config['MERCURY_VERSION']),
},
'GIT_BINARY': {
'path': bin_path(config['GIT_BINARY']),
'version': config['GIT_VERSION'],
'hash': bin_hash(config['GIT_BINARY']),
'enabled': config['USE_GIT'],
'is_valid': bool(config['GIT_VERSION']),
},
'YOUTUBEDL_BINARY': {
'path': bin_path(config['YOUTUBEDL_BINARY']),
'version': config['YOUTUBEDL_VERSION'],
'hash': bin_hash(config['YOUTUBEDL_BINARY']),
'enabled': config['USE_YOUTUBEDL'],
'is_valid': bool(config['YOUTUBEDL_VERSION']),
},
'CHROME_BINARY': {
'path': bin_path(config['CHROME_BINARY']),
'version': config['CHROME_VERSION'],
'hash': bin_hash(config['CHROME_BINARY']),
'enabled': config['USE_CHROME'],
'is_valid': bool(config['CHROME_VERSION']),
},
'RIPGREP_BINARY': {
'path': bin_path(config['RIPGREP_BINARY']),
'version': config['RIPGREP_VERSION'],
'hash': bin_hash(config['RIPGREP_BINARY']),
'enabled': config['USE_RIPGREP'],
'is_valid': bool(config['RIPGREP_VERSION']),
},
# TODO: add an entry for the sonic search backend?
# 'SONIC_BINARY': {
# 'path': bin_path(config['SONIC_BINARY']),
# 'version': config['SONIC_VERSION'],
# 'hash': bin_hash(config['SONIC_BINARY']),
# 'enabled': config['USE_SONIC'],
# 'is_valid': bool(config['SONIC_VERSION']),
# },
}
def get_chrome_info(config: ConfigDict) -> ConfigValue:
return {
'TIMEOUT': config['TIMEOUT'],
'RESOLUTION': config['RESOLUTION'],
'CHECK_SSL_VALIDITY': config['CHECK_SSL_VALIDITY'],
'CHROME_BINARY': config['CHROME_BINARY'],
'CHROME_HEADLESS': config['CHROME_HEADLESS'],
'CHROME_SANDBOX': config['CHROME_SANDBOX'],
'CHROME_USER_AGENT': config['CHROME_USER_AGENT'],
'CHROME_USER_DATA_DIR': config['CHROME_USER_DATA_DIR'],
}
# ******************************************************************************
# ******************************************************************************
# ******************************** Load Config *********************************
# ******* (compile the defaults, configs, and metadata all into CONFIG) ********
# ******************************************************************************
# ******************************************************************************
def load_all_config():
CONFIG: ConfigDict = {}
for section_name, section_config in CONFIG_SCHEMA.items():
CONFIG = load_config(section_config, CONFIG)
return load_config(DYNAMIC_CONFIG_SCHEMA, CONFIG)
# add all final config values in CONFIG to globals in this file
CONFIG = load_all_config()
globals().update(CONFIG)
# this lets us do: from .config import DEBUG, MEDIA_TIMEOUT, ...
# ******************************************************************************
# ******************************************************************************
# ******************************************************************************
# ******************************************************************************
# ******************************************************************************
########################### System Environment Setup ###########################
# Set timezone to UTC and umask to OUTPUT_PERMISSIONS
os.environ["TZ"] = 'UTC'
os.umask(0o777 - int(OUTPUT_PERMISSIONS, base=8)) # noqa: F821
# add ./node_modules/.bin to $PATH so we can use node scripts in extractors
NODE_BIN_PATH = str((Path(CONFIG["OUTPUT_DIR"]).absolute() / 'node_modules' / '.bin'))
sys.path.append(NODE_BIN_PATH)
# disable stderr "you really shouldnt disable ssl" warnings with library config
if not CONFIG['CHECK_SSL_VALIDITY']:
import urllib3
import requests
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
########################### Config Validity Checkers ###########################
def check_system_config(config: ConfigDict=CONFIG) -> None:
### Check system environment
if config['USER'] == 'root':
stderr('[!] ArchiveBox should never be run as root!', color='red')
stderr(' For more information, see the security overview documentation:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Security-Overview#do-not-run-as-root')
raise SystemExit(2)
### Check Python environment
if sys.version_info[:3] < (3, 6, 0):
stderr(f'[X] Python version is not new enough: {config["PYTHON_VERSION"]} (>3.6 is required)', color='red')
stderr(' See https://github.com/ArchiveBox/ArchiveBox/wiki/Troubleshooting#python for help upgrading your Python installation.')
raise SystemExit(2)
if config['PYTHON_ENCODING'] not in ('UTF-8', 'UTF8'):
stderr(f'[X] Your system is running python3 scripts with a bad locale setting: {config["PYTHON_ENCODING"]} (it should be UTF-8).', color='red')
stderr(' To fix it, add the line "export PYTHONIOENCODING=UTF-8" to your ~/.bashrc file (without quotes)')
stderr(' Or if you\'re using ubuntu/debian, run "dpkg-reconfigure locales"')
stderr('')
stderr(' Confirm that it\'s fixed by opening a new shell and running:')
stderr(' python3 -c "import sys; print(sys.stdout.encoding)" # should output UTF-8')
raise SystemExit(2)
# stderr('[i] Using Chrome binary: {}'.format(shutil.which(CHROME_BINARY) or CHROME_BINARY))
# stderr('[i] Using Chrome data dir: {}'.format(os.path.abspath(CHROME_USER_DATA_DIR)))
if config['CHROME_USER_DATA_DIR'] is not None:
if not (Path(config['CHROME_USER_DATA_DIR']) / 'Default').exists():
stderr('[X] Could not find profile "Default" in CHROME_USER_DATA_DIR.', color='red')
stderr(f' {config["CHROME_USER_DATA_DIR"]}')
stderr(' Make sure you set it to a Chrome user data directory containing a Default profile folder.')
stderr(' For more info see:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration#CHROME_USER_DATA_DIR')
if '/Default' in str(config['CHROME_USER_DATA_DIR']):
stderr()
stderr(' Try removing /Default from the end e.g.:')
stderr(' CHROME_USER_DATA_DIR="{}"'.format(config['CHROME_USER_DATA_DIR'].split('/Default')[0]))
raise SystemExit(2)
def check_dependencies(config: ConfigDict=CONFIG, show_help: bool=True) -> None:
invalid_dependencies = [
(name, info) for name, info in config['DEPENDENCIES'].items()
if info['enabled'] and not info['is_valid']
]
if invalid_dependencies and show_help:
stderr(f'[!] Warning: Missing {len(invalid_dependencies)} recommended dependencies', color='lightyellow')
for dependency, info in invalid_dependencies:
stderr(
' ! {}: {} ({})'.format(
dependency,
info['path'] or 'unable to find binary',
info['version'] or 'unable to detect version',
)
)
if dependency in ('YOUTUBEDL_BINARY', 'CHROME_BINARY', 'SINGLEFILE_BINARY', 'READABILITY_BINARY', 'MERCURY_BINARY'):
hint(('To install all packages automatically run: archivebox setup',
f'or to disable it and silence this warning: archivebox config --set SAVE_{dependency.rsplit("_", 1)[0]}=False',
''), prefix=' ')
stderr('')
if config['TIMEOUT'] < 5:
stderr(f'[!] Warning: TIMEOUT is set too low! (currently set to TIMEOUT={config["TIMEOUT"]} seconds)', color='red')
stderr(' You must allow *at least* 5 seconds for indexing and archive methods to run succesfully.')
stderr(' (Setting it to somewhere between 30 and 3000 seconds is recommended)')
stderr()
stderr(' If you want to make ArchiveBox run faster, disable specific archive methods instead:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration#archive-method-toggles')
stderr()
elif config['USE_CHROME'] and config['TIMEOUT'] < 15:
stderr(f'[!] Warning: TIMEOUT is set too low! (currently set to TIMEOUT={config["TIMEOUT"]} seconds)', color='red')
stderr(' Chrome will fail to archive all sites if set to less than ~15 seconds.')
stderr(' (Setting it to somewhere between 30 and 300 seconds is recommended)')
stderr()
stderr(' If you want to make ArchiveBox run faster, disable specific archive methods instead:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration#archive-method-toggles')
stderr()
if config['USE_YOUTUBEDL'] and config['MEDIA_TIMEOUT'] < 20:
stderr(f'[!] Warning: MEDIA_TIMEOUT is set too low! (currently set to MEDIA_TIMEOUT={config["MEDIA_TIMEOUT"]} seconds)', color='red')
stderr(' Youtube-dl will fail to archive all media if set to less than ~20 seconds.')
stderr(' (Setting it somewhere over 60 seconds is recommended)')
stderr()
stderr(' If you want to disable media archiving entirely, set SAVE_MEDIA=False instead:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration#save_media')
stderr()
def check_data_folder(out_dir: Union[str, Path, None]=None, config: ConfigDict=CONFIG) -> None:
output_dir = out_dir or config['OUTPUT_DIR']
assert isinstance(output_dir, (str, Path))
archive_dir_exists = (Path(output_dir) / ARCHIVE_DIR_NAME).exists()
if not archive_dir_exists:
stderr('[X] No archivebox index found in the current directory.', color='red')
stderr(f' {output_dir}', color='lightyellow')
stderr()
stderr(' {lightred}Hint{reset}: Are you running archivebox in the right folder?'.format(**config['ANSI']))
stderr(' cd path/to/your/archive/folder')
stderr(' archivebox [command]')
stderr()
stderr(' {lightred}Hint{reset}: To create a new archive collection or import existing data in this folder, run:'.format(**config['ANSI']))
stderr(' archivebox init')
raise SystemExit(2)
def check_migrations(out_dir: Union[str, Path, None]=None, config: ConfigDict=CONFIG):
output_dir = out_dir or config['OUTPUT_DIR']
from .index.sql import list_migrations
pending_migrations = [name for status, name in list_migrations() if not status]
if pending_migrations:
stderr('[X] This collection was created with an older version of ArchiveBox and must be upgraded first.', color='lightyellow')
stderr(f' {output_dir}')
stderr()
stderr(f' To upgrade it to the latest version and apply the {len(pending_migrations)} pending migrations, run:')
stderr(' archivebox init')
raise SystemExit(3)
(Path(output_dir) / SOURCES_DIR_NAME).mkdir(exist_ok=True)
(Path(output_dir) / LOGS_DIR_NAME).mkdir(exist_ok=True)
def setup_django(out_dir: Path=None, check_db=False, config: ConfigDict=CONFIG, in_memory_db=False) -> None:
check_system_config()
output_dir = out_dir or Path(config['OUTPUT_DIR'])
assert isinstance(output_dir, Path) and isinstance(config['PACKAGE_DIR'], Path)
try:
from django.core.management import call_command
sys.path.append(str(config['PACKAGE_DIR']))
os.environ.setdefault('OUTPUT_DIR', str(output_dir))
assert (config['PACKAGE_DIR'] / 'core' / 'settings.py').exists(), 'settings.py was not found at archivebox/core/settings.py'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
# Check to make sure JSON extension is available in our Sqlite3 instance
try:
cursor = sqlite3.connect(':memory:').cursor()
cursor.execute('SELECT JSON(\'{"a": "b"}\')')
except sqlite3.OperationalError as exc:
stderr(f'[X] Your SQLite3 version is missing the required JSON1 extension: {exc}', color='red')
hint([
'Upgrade your Python version or install the extension manually:',
'https://code.djangoproject.com/wiki/JSON1Extension'
])
if in_memory_db:
# some commands (e.g. oneshot) dont store a long-lived sqlite3 db file on disk.
# in those cases we create a temporary in-memory db and run the migrations
# immediately to get a usable in-memory-database at startup
os.environ.setdefault("ARCHIVEBOX_DATABASE_NAME", ":memory:")
django.setup()
call_command("migrate", interactive=False, verbosity=0)
else:
# Otherwise use default sqlite3 file-based database and initialize django
# without running migrations automatically (user runs them manually by calling init)
django.setup()
from django.conf import settings
# log startup message to the error log
with open(settings.ERROR_LOG, "a+", encoding='utf-8') as f:
command = ' '.join(sys.argv)
ts = datetime.now(timezone.utc).strftime('%Y-%m-%d__%H:%M:%S')
f.write(f"\n> {command}; ts={ts} version={config['VERSION']} docker={config['IN_DOCKER']} is_tty={config['IS_TTY']}\n")
if check_db:
# Enable WAL mode in sqlite3
from django.db import connection
with connection.cursor() as cursor:
current_mode = cursor.execute("PRAGMA journal_mode")
if current_mode != 'wal':
cursor.execute("PRAGMA journal_mode=wal;")
# Create cache table in DB if needed
try:
from django.core.cache import cache
cache.get('test', None)
except django.db.utils.OperationalError:
call_command("createcachetable", verbosity=0)
# if archivebox gets imported multiple times, we have to close
# the sqlite3 whenever we init from scratch to avoid multiple threads
# sharing the same connection by accident
from django.db import connections
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
sql_index_path = Path(output_dir) / SQL_INDEX_FILENAME
assert sql_index_path.exists(), (
f'No database file {SQL_INDEX_FILENAME} found in: {config["OUTPUT_DIR"]} (Are you in an ArchiveBox collection directory?)')
except KeyboardInterrupt:
raise SystemExit(2)
| python | 55,688 |
# -*- coding: utf-8 -*-
"""Example of use
So we have a theory that is already implemented. The implementation of theory
can be found in the dedicated file. The data can be found there also.
"""
| python | 197 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Context of auto parallel"""
import threading
import mindspore.context as context
from mindspore.parallel._dp_allreduce_fusion import _set_fusion_strategy_by_idx, _set_fusion_strategy_by_size
from mindspore.parallel._ps_context import _is_role_pserver
from mindspore._c_expression import AutoParallelContext
from mindspore._checkparam import args_type_check
_MAX_GROUP_NAME_LEN = 127
_DEFAULT_HCCL_FUSION_GROUP_NAME = "hccl_world_groupsum1"
_DEFAULT_NCCL_FUSION_GROUP_NAME = "nccl_world_groupsum1"
class _AutoParallelContext:
"""
_AutoParallelContext is the environment in which operations are executed
Note:
Create a context through instantiating Context object is not recommended.
Should use auto_parallel_context() to get the context since Context is singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._context_handle = AutoParallelContext.get_instance()
def __new__(cls):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def check_context_handle(self):
"""
Check context handle.
Raises:
ValueError: If the context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
def set_device_num(self, device_num):
"""
Set device num for auto parallel.
Args:
device_num (int): The device number.
Raises:
ValueError: If the device num is not in [1, 4096].
"""
self.check_context_handle()
if device_num < 1 or device_num > 4096:
raise ValueError("Device num must be in [1, 4096], but got {}".format(device_num))
self._context_handle.set_device_num(device_num)
def get_device_num(self):
"""Get device num."""
self.check_context_handle()
return self._context_handle.get_device_num()
def set_global_rank(self, global_rank):
"""
Set global rank for auto parallel.
Args:
global_rank (int): The rank id of current rank.
Raises:
ValueError: If the global rank is not in [1, 4096].
"""
self.check_context_handle()
if global_rank < 0 or global_rank > 4095:
raise ValueError("Global rank must be in [0, 4095], but got {}".format(global_rank))
self._context_handle.set_global_rank(global_rank)
def get_global_rank(self):
"""Get current rank id."""
self.check_context_handle()
return self._context_handle.get_global_rank()
def set_pipeline_stages(self, stages):
"""Set the stages of the pipeline"""
self.check_context_handle()
self._context_handle.set_pipeline_stage_split_num(stages)
def get_pipeline_stages(self):
"""Get the stages of the pipeline"""
self.check_context_handle()
return self._context_handle.get_pipeline_stage_split_num()
def set_gradients_mean(self, gradients_mean):
"""
Set gradients_mean flag.
Note:
If gradients_mean is true, it will insert a div operator after parameter gradients allreduce.
Args:
gradients_mean (bool): The gradients_mean flag.
"""
self.check_context_handle()
self._context_handle.set_gradients_mean(gradients_mean)
def get_gradients_mean(self):
"""Get gradients_mean flag."""
self.check_context_handle()
return self._context_handle.get_gradients_mean()
def set_gradient_fp32_sync(self, gradient_fp32_sync):
"""
Set gradient_fp32_sync.
Note:
If gradient_fp32_sync is true,
it will convert tensor type from fp16 to fp32 before parameter gradients allreduce.
Args:
gradient_fp32_sync (bool): The gradient_fp32_sync flag.
"""
self.check_context_handle()
self._context_handle.set_gradient_fp32_sync(gradient_fp32_sync)
def get_gradient_fp32_sync(self):
"""Get gradient_fp32_sync flag."""
self.check_context_handle()
return self._context_handle.get_gradient_fp32_sync()
def set_loss_repeated_mean(self, loss_repeated_mean):
"""
Set loss_repeated_mean flag.
Note:
If loss_repeated_mean is true,
Distributed automatic differentiation will perform a mean operator
in backward in the case of repeated calculations.
Args:
loss_repeated_mean (bool): The loss_repeated_mean flag.
"""
self.check_context_handle()
self._context_handle.set_loss_repeated_mean(loss_repeated_mean)
def get_loss_repeated_mean(self):
"""Get loss_repeated_mean flag."""
self.check_context_handle()
return self._context_handle.get_loss_repeated_mean()
def set_parallel_mode(self, parallel_mode):
"""
Set parallel mode for auto parallel.
Args:
parallel_mode (str): The parallel mode of auto parallel.
Raises:
ValueError: If parallel mode is not supported.
"""
self.check_context_handle()
ret = self._context_handle.set_parallel_mode(parallel_mode)
if ret is False:
raise ValueError("Parallel mode does not support {}".format(parallel_mode))
def get_parallel_mode(self):
"""Get parallel mode."""
self.check_context_handle()
if _is_role_pserver():
return context.ParallelMode.STAND_ALONE
return self._context_handle.get_parallel_mode()
def set_strategy_search_mode(self, auto_parallel_search_mode):
"""
Set search mode of strategy.
Args:
auto_parallel_search_mode (str): The search mode of strategy.
"""
self.check_context_handle()
ret = self._context_handle.set_strategy_search_mode(auto_parallel_search_mode)
if ret is False:
raise ValueError("Strategy search mode does not support {}".format(auto_parallel_search_mode))
def get_strategy_search_mode(self):
"""Get search mode of strategy."""
self.check_context_handle()
return self._context_handle.get_strategy_search_mode()
def set_parameter_broadcast(self, parameter_broadcast):
"""
Set parameter broadcast.
Args:
parameter_broadcast (bool): Parameter broadcast or not.
"""
self.check_context_handle()
self._context_handle.set_parameter_broadcast(parameter_broadcast)
def get_parameter_broadcast(self):
"""Get parameter broadcast flag."""
self.check_context_handle()
return self._context_handle.get_parameter_broadcast()
def set_strategy_ckpt_load_file(self, strategy_ckpt_load_file):
"""
Set strategy checkpoint load path.
Args:
strategy_ckpt_load_file (bool): Path to load parallel strategy checkpoint.
"""
self.check_context_handle()
self._context_handle.set_strategy_ckpt_load_file(strategy_ckpt_load_file)
def get_strategy_ckpt_load_file(self):
"""Get strategy checkpoint load path."""
self.check_context_handle()
return self._context_handle.get_strategy_ckpt_load_file()
def set_full_batch(self, full_batch):
"""
Set whether load full batch on each device.
Args:
full_batch (bool): True if load full batch on each device.
"""
self.check_context_handle()
self._context_handle.set_full_batch(full_batch)
def get_full_batch(self):
"""Get whether load full batch on each device."""
self.check_context_handle()
if _is_role_pserver():
return False
return self._context_handle.get_full_batch()
def set_grad_accumulation_step(self, grad_accumulation_step):
"""
Set grad accumulation step.
Args:
grad_accumulation_step (int): The grad accumulation step.
"""
self.check_context_handle()
self._context_handle.set_grad_accumulation_step(grad_accumulation_step)
def get_grad_accumulation_step(self):
"""Get grad accumulation step."""
self.check_context_handle()
return self._context_handle.get_grad_accumulation_step()
def set_strategy_ckpt_save_file(self, strategy_ckpt_save_file):
"""
Set strategy checkpoint save path.
Args:
strategy_ckpt_save_file (bool): Path to save parallel strategy checkpoint.
"""
self.check_context_handle()
import os
dir_path = os.path.dirname(strategy_ckpt_save_file)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
self._context_handle.set_strategy_ckpt_save_file(strategy_ckpt_save_file)
def get_strategy_ckpt_save_file(self):
"""Get strategy checkpoint save path."""
self.check_context_handle()
return self._context_handle.get_strategy_ckpt_save_file()
def set_group_ckpt_save_file(self, group_ckpt_save_file):
"""Set group checkpoint save path."""
self.check_context_handle()
import os
dir_path = os.path.dirname(group_ckpt_save_file)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
self._context_handle.set_group_ckpt_save_file(group_ckpt_save_file)
def get_parameter_broadcast_is_set(self):
"""Get parameter broadcast is set or not."""
self.check_context_handle()
return self._context_handle.get_parameter_broadcast_is_set()
def set_all_reduce_fusion_split_indices(self, indices, group=""):
"""
Set allreduce fusion strategy by parameters indices.
Args:
indices (list): Indices list.
group (str): The communication group of hccl/nccl.
Raises:
TypeError: If type of indices item is not int.
TypeError: If group is not a python str.
"""
self.check_context_handle()
if not indices:
raise ValueError('indices can not be empty')
if isinstance(indices, (list)):
for index in indices:
if not isinstance(index, int):
raise TypeError('indices has invalid value')
else:
raise TypeError('indices must be a python list')
if len(set(indices)) != len(indices):
raise ValueError('indices has duplicate elements')
if sorted(indices) != indices:
raise ValueError('elements in indices must be sorted in ascending order')
if isinstance(group, (str)):
group_len = len(group)
if group_len > _MAX_GROUP_NAME_LEN:
raise ValueError('Group name len is out of range {_MAX_GROUP_NAME_LEN}')
else:
raise TypeError('Group must be a python str')
if group == "":
if context.get_context("device_target") == "Ascend":
group = _DEFAULT_HCCL_FUSION_GROUP_NAME
else:
group = _DEFAULT_NCCL_FUSION_GROUP_NAME
self._context_handle.set_all_reduce_fusion_split_indices(indices, group)
if context.get_context("device_target") == "Ascend" and context.get_context("enable_ge"):
_set_fusion_strategy_by_idx(indices)
def get_all_reduce_fusion_split_indices(self, group=""):
"""
Get allreduce fusion split indices.
Args:
group (str): The communication group of hccl/nccl.
Returns:
Return split sizes list according to the group.
Raises:
TypeError: If group is not a python str.
"""
self.check_context_handle()
if isinstance(group, (str)):
group_len = len(group)
if group_len > _MAX_GROUP_NAME_LEN:
raise ValueError('Group name len is out of range {_MAX_GROUP_NAME_LEN}')
else:
raise TypeError('Group must be a python str')
if group == "":
if context.get_context("device_target") == "Ascend":
group = _DEFAULT_HCCL_FUSION_GROUP_NAME
else:
group = _DEFAULT_NCCL_FUSION_GROUP_NAME
return self._context_handle.get_all_reduce_fusion_split_indices(group)
def set_all_reduce_fusion_split_sizes(self, sizes, group=""):
"""
Set allreduce fusion strategy by parameters data sizes.
Args:
sizes (list): Sizes list.
group (str): The communication group of hccl/nccl.
Raises:
TypeError: If type of sizes item is not int.
TypeError: If group is not a python str.
"""
self.check_context_handle()
if isinstance(sizes, (list)):
for size in sizes:
if not isinstance(size, int):
raise TypeError('sizes has invalid value')
else:
raise TypeError('sizes must be a python list')
if isinstance(group, (str)):
group_len = len(group)
if group_len > _MAX_GROUP_NAME_LEN:
raise ValueError('Group name len is out of range {_MAX_GROUP_NAME_LEN}')
else:
raise TypeError('Group must be a python str')
if group == "":
if context.get_context("device_target") == "Ascend":
group = _DEFAULT_HCCL_FUSION_GROUP_NAME
else:
group = _DEFAULT_NCCL_FUSION_GROUP_NAME
self._context_handle.set_all_reduce_fusion_split_sizes(sizes, group)
if context.get_context("device_target") == "Ascend":
_set_fusion_strategy_by_size(sizes)
def get_all_reduce_fusion_split_sizes(self, group=""):
"""
Get allreduce fusion split sizes.
Args:
group (str): The communication group of hccl/nccl.
Returns:
Return split sizes list according to the group.
Raises:
TypeError: If group is not a python str.
"""
self.check_context_handle()
if isinstance(group, (str)):
group_len = len(group)
if group_len > _MAX_GROUP_NAME_LEN:
raise ValueError('Group name len is out of range {_MAX_GROUP_NAME_LEN}')
else:
raise TypeError('Group must be a python str')
if group == "":
if context.get_context("device_target") == "Ascend":
group = _DEFAULT_HCCL_FUSION_GROUP_NAME
else:
group = _DEFAULT_NCCL_FUSION_GROUP_NAME
return self._context_handle.get_all_reduce_fusion_split_sizes(group)
def set_enable_all_reduce_fusion(self, enable_all_reduce_fusion):
"""
Set enable/disable all reduce fusion.
Args:
enable_all_reduce_fusion (bool): Enable/disable all reduce fusion.
"""
self.check_context_handle()
if not isinstance(enable_all_reduce_fusion, bool):
raise TypeError('enable_all_reduce_fusion is invalid type')
self._context_handle.set_enable_all_reduce_fusion(enable_all_reduce_fusion)
def get_enable_all_reduce_fusion(self):
"""Get all reduce fusion flag."""
self.check_context_handle()
return self._context_handle.get_enable_all_reduce_fusion()
def get_device_num_is_set(self):
"""Get device number is set or not."""
self.check_context_handle()
return self._context_handle.get_device_num_is_set()
def get_global_rank_is_set(self):
"""Get global rank is set or not."""
self.check_context_handle()
return self._context_handle.get_global_rank_is_set()
def set_enable_parallel_optimizer(self, enable_parallel_optimizer):
"""
Set enable/disable parallel optimizer.
Args:
set_enable_parallel_optimizer (bool): Enable/disable parallel optimizer.
"""
self.check_context_handle()
if not isinstance(enable_parallel_optimizer, bool):
raise TypeError('enable_parallel_optimizer is invalid type')
self._context_handle.set_enable_parallel_optimizer(enable_parallel_optimizer)
def get_enable_parallel_optimizer(self):
"""Get parallel optimizer flag."""
self.check_context_handle()
return self._context_handle.get_enable_parallel_optimizer()
def set_communi_parallel_mode(self, communi_parallel_mode):
"""
Set communication parallel mode.
Args:
communi_parallel_mode (str): The communication parallel mode.
Raises:
ValueError: If parallel mode is not supported.
"""
self.check_context_handle()
ret = self._context_handle.set_communi_parallel_mode(communi_parallel_mode)
if ret is False:
raise ValueError("Communication parallel mode does not support {}".format(communi_parallel_mode))
def get_communi_parallel_mode(self):
"""Get communication parallel mode."""
self.check_context_handle()
return self._context_handle.get_communi_parallel_mode()
def reset(self):
"""Reset all settings."""
self.check_context_handle()
self._context_handle.reset()
_auto_parallel_context = None
def auto_parallel_context():
"""
Get the global _auto_parallel_context, if it is not created, create a new one.
Returns:
_AutoParallelContext, the global auto parallel context.
"""
global _auto_parallel_context
if _auto_parallel_context is None:
_auto_parallel_context = _AutoParallelContext()
return _auto_parallel_context
_set_auto_parallel_context_func_map = {
"device_num": auto_parallel_context().set_device_num,
"global_rank": auto_parallel_context().set_global_rank,
"gradients_mean": auto_parallel_context().set_gradients_mean,
"gradient_fp32_sync": auto_parallel_context().set_gradient_fp32_sync,
"loss_repeated_mean": auto_parallel_context().set_loss_repeated_mean,
"pipeline_stages": auto_parallel_context().set_pipeline_stages,
"parallel_mode": auto_parallel_context().set_parallel_mode,
"auto_parallel_search_mode": auto_parallel_context().set_strategy_search_mode,
"parameter_broadcast": auto_parallel_context().set_parameter_broadcast,
"strategy_ckpt_load_file": auto_parallel_context().set_strategy_ckpt_load_file,
"strategy_ckpt_save_file": auto_parallel_context().set_strategy_ckpt_save_file,
"group_ckpt_save_file": auto_parallel_context().set_group_ckpt_save_file,
"full_batch": auto_parallel_context().set_full_batch,
"enable_parallel_optimizer": auto_parallel_context().set_enable_parallel_optimizer,
"grad_accumulation_step": auto_parallel_context().set_grad_accumulation_step,
"all_reduce_fusion_config": auto_parallel_context().set_all_reduce_fusion_split_indices,
"communi_parallel_mode": auto_parallel_context().set_communi_parallel_mode}
_get_auto_parallel_context_func_map = {
"device_num": auto_parallel_context().get_device_num,
"global_rank": auto_parallel_context().get_global_rank,
"gradients_mean": auto_parallel_context().get_gradients_mean,
"gradient_fp32_sync": auto_parallel_context().get_gradient_fp32_sync,
"loss_repeated_mean": auto_parallel_context().get_loss_repeated_mean,
"pipeline_stages": auto_parallel_context().get_pipeline_stages,
"parallel_mode": auto_parallel_context().get_parallel_mode,
"auto_parallel_search_mode": auto_parallel_context().get_strategy_search_mode,
"parameter_broadcast": auto_parallel_context().get_parameter_broadcast,
"strategy_ckpt_load_file": auto_parallel_context().get_strategy_ckpt_load_file,
"strategy_ckpt_save_file": auto_parallel_context().get_strategy_ckpt_save_file,
"full_batch": auto_parallel_context().get_full_batch,
"enable_parallel_optimizer": auto_parallel_context().get_enable_parallel_optimizer,
"grad_accumulation_step": auto_parallel_context().get_grad_accumulation_step,
"all_reduce_fusion_config": auto_parallel_context().get_all_reduce_fusion_split_indices,
"communi_parallel_mode": auto_parallel_context().get_communi_parallel_mode}
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool,
loss_repeated_mean=bool, parallel_mode=str, auto_parallel_search_mode=str,
parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
grad_accumulation_step=int, all_reduce_fusion_config=list, group_ckpt_save_file=str,
communi_parallel_mode=str)
def _set_auto_parallel_context(**kwargs):
"""
Set auto parallel context.
Note:
Attribute name is required for setting attributes.
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
gradients_mean (bool): Whether to perform mean operator after all-reduce of mirror. Default: False.
loss_repeated_mean (bool): Whether to perform mean operator in backward in the case of repeated
calculations. Default: True.
gradient_fp32_sync (bool): Gradients allreduce by fp32 even though gradients is fp16 if this flag is True.
Default: True.
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Default: "stand_alone".
- stand_alone: Only one processor working.
- data_parallel: Distributing the data across different processors.
- hybrid_parallel: Achieving data parallelism and model parallelism manually.
- semi_auto_parallel: Achieving data parallelism and model parallelism by
setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
auto_parallel_search_mode (str): There are two kinds of search modes, "recursive_programming"
and "dynamic_programming". Default: "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
parameter_broadcast (bool): Indicating whether to broadcast parameters before training.
"stand_alone", "semi_auto_parallel" and "auto_parallel" do not support parameter
broadcast. Default: False.
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
group_ckpt_save_file (str): The path to save parallel group checkpoint. Default: ''
full_batch (bool): Whether to load the whole batch on each device. Default: False.
enable_parallel_optimizer (bool): Enable using optimizer segmentation or not. Default: False.
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices.
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how
the devices are distributed alone the pipeline. The total devices will be divided into
'pipeline_stags' stages. This currently could only be used when
parall mode semi_auto_parallel is enabled. Default: 0
communi_parallel_mode (str): There are tree kinds of communication parallel modes, "all_group_parallel",
"same_server_group_parallel" and "no_group_parallel". Default: "all_group_parallel".
- all_group_parallel: All communication groups are in parallel.
- same_server_group_parallel: Only the communication groups within the same server are parallel.
- no_group_parallel: All communication groups are not parallel.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
for key, value in kwargs.items():
if key not in _set_auto_parallel_context_func_map:
raise ValueError("Set context keyword %s is not recognized!" % key)
set_func = _set_auto_parallel_context_func_map[key]
set_func(value)
def _get_auto_parallel_context(attr_key):
"""
Get auto parallel context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Return attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
if attr_key not in _get_auto_parallel_context_func_map:
raise ValueError("Get context keyword %s is not recognized!" % attr_key)
get_func = _get_auto_parallel_context_func_map[attr_key]
return get_func()
def _reset_auto_parallel_context():
"""
Reset auto parallel context attributes to the default values:
- device_num: 1.
- global_rank: 0.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: "stand_alone".
- parameter_broadcast: False.
- strategy_ckpt_load_file: ""
- strategy_ckpt_save_file: ""
- enable_parallel_optimizer: False
- auto_parallel_search_mode: dynamic_programming
- pipeline_stages: 0
"""
auto_parallel_context().reset()
| python | 26,537 |
#!/usr/bin/env python
"""""""""Tests for `ldsnotes` package."""""""""
import pytest
import os
from ldsnotes import Notes, Bookmark, Journal, Highlight, Reference
@pytest.fixture(scope="session")
def notes():
return Notes(os.environ['USERNAME'], os.environ['PASSWORD'])
def test_login(notes):
pass
""""""""" TEST CONSTRUCTOR FOR ALL TYPE OF ANNOTATIONS """""""""
def test_bookmark(notes):
n = notes.search(annot_type="bookmark", start=1, stop=3)
assert len(n) == 2
assert isinstance(n[0], Bookmark)
def test_journal(notes):
n = notes.search(annot_type="journal", start=1, stop=3)
assert len(n) == 2
assert isinstance(n[0], Journal)
def test_highlight(notes):
n = notes.search(annot_type="highlight", start=1, stop=3)
assert len(n) == 2
assert isinstance(n[0], Highlight)
def test_reference(notes):
n = notes.search(annot_type="reference", start=1, stop=3)
assert len(n) == 2
assert isinstance(n[0], Reference)
""""""""" TEST SEARCH FUNCTION """""""""
def test_tag(notes):
n = notes.search(tag="Faith", start=1, stop=3)
for i in n:
assert "Faith" in i.tags
def test_folder(notes):
n = notes.search(folder="Journal", annot_type="highlight", start=1, stop=3)
j_id = [i.id for i in notes.folders if i.name == "Journal"][0]
for i in n:
assert j_id in i.folders_id
""""""""" TEST INDEXING """""""""
def test_index(notes):
assert not isinstance(notes[1], list)
assert len(notes[:10]) == 10
assert len(notes[1:11]) == 10
| python | 1,581 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ethiopian_date_converter` package."""
import unittest
from ethiopian_date_converter import converter
class TestEthiopian_date_converter(unittest.TestCase):
"""Tests for `ethiopian_date_converter` package."""
def test_ethiopic_to_gregorian(self):
pairs = [
[('5492-05-07', 'AA'), '0000-01-01'],
[('5493-05-08', 'AA'), '0001-01-01'],
[('5499-13-06', 'AA'), '0007-08-27'],
[('5500-01-01', 'AA'), '0007-08-28'],
[('5500-01-02', 'AA'), '0007-08-29'],
[('5500-13-05', 'AA'), '0008-08-26'],
[('1855-02-20', ), '1862-10-29'],
[('1857-10-29', ), '1865-07-05'],
[('1858-05-22', ), '1866-01-29'],
[('1858-08-10', ), '1866-04-17'],
[('1859-04-28', ), '1867-01-05'],
[('1860-05-05', ), '1868-01-13'],
[('0001-01-01', ), '0008-08-27'],
[('0002-01-01', ), '0009-08-27'],
[('0003-01-01', ), '0010-08-27'],
[('0004-01-01', ), '0011-08-28'],
[('0001-13-05', ), '0009-08-26'],
[('0002-13-05', ), '0010-08-26'],
[('0003-13-05', ), '0011-08-26'],
[('0003-13-06', ), '0011-08-27'],
[('0004-13-05', ), '0012-08-26'],
[('1575-02-06', ), '1582-10-13'],
[('1575-02-07', ), '1582-10-14'],
[('1575-02-08', ), '1582-10-15'],
[('1575-02-09', ), '1582-10-16'],
[('1892-04-23', ), '1900-01-01'],
[('1997-04-23', ), '2005-01-01'],
[('2000-13-05', ), '2008-09-10'],
[('1893-04-22', ), '1900-12-31'],
[('1985-04-22', ), '1992-12-31'],
[('1989-04-22', ), '1996-12-31'],
[('1993-04-22', ), '2000-12-31'],
[('1997-04-22', ), '2004-12-31'],
[('2001-04-22', ), '2008-12-31'],
[('2993-04-14', ), '3000-12-31'],
[('3993-04-07', ), '4000-12-31'],
[('5993-03-22', ), '6000-12-31']]
for ethiopic, gregorian in pairs:
assert converter(*ethiopic, to='gregorian') == gregorian, gregorian
def test_gregorian_to_ethiopic(self):
pairs = [[('1862-10-29', ), '1855-02-20'],
[('1865-07-05', ), '1857-10-29'],
[('1866-01-29', ), '1858-05-22'],
[('1866-04-17', ), '1858-08-10'],
[('1867-01-05', ), '1859-04-28'],
[('1868-01-13', ), '1860-05-05'],
[('0001-01-01', ), '5493-05-08'],
[('0007-08-27', ), '5499-13-06'],
[('0007-08-28', ), '5500-01-01'],
[('0007-08-29', ), '5500-01-02'],
[('0008-08-27', ), '0001-01-01'],
[('0009-08-27', ), '0002-01-01'],
[('0010-08-27', ), '0003-01-01'],
[('0011-08-28', ), '0004-01-01'],
[('0008-08-26', ), '5500-13-05'],
[('0009-08-26', ), '0001-13-05'],
[('0010-08-26', ), '0002-13-05'],
[('0011-08-26', ), '0003-13-05'],
[('0011-08-27', ), '0003-13-06'],
[('0012-08-26', ), '0004-13-05'],
[('1582-10-13', ), '1575-02-06'],
[('1582-10-14', ), '1575-02-07'],
[('1582-10-15', ), '1575-02-08'],
[('1582-10-16', ), '1575-02-09'],
[('1900-01-01', ), '1892-04-23'],
[('2005-01-01', ), '1997-04-23'],
[('2008-09-10', ), '2000-13-05'],
[('1900-12-31', ), '1893-04-22'],
[('1992-12-31', ), '1985-04-22'],
[('1996-12-31', ), '1989-04-22'],
[('2000-12-31', ), '1993-04-22'],
[('2004-12-31', ), '1997-04-22'],
[('2008-12-31', ), '2001-04-22'],
[('3000-12-31', ), '2993-04-14'],
[('4000-12-31', ), '3993-04-07'],
[('6000-12-31', ), '5993-03-22']]
for gregorian, ethiopic in pairs:
assert converter(*gregorian, to='ethiopian') == ethiopic, ethiopic
if __name__ == '__main__':
unittest.main()
| python | 4,237 |
"""
Unit test for `start-api` CLI
"""
from unittest import TestCase
from mock import patch, Mock
from parameterized import parameterized
from samcli.commands.local.start_api.cli import do_cli as start_api_cli
from samcli.commands.local.lib.exceptions import NoApisDefined, InvalidLayerReference
from samcli.commands.exceptions import UserException
from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException
from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError
from samcli.local.docker.lambda_container import DebuggingNotSupported
class TestCli(TestCase):
def setUp(self):
self.template = "template"
self.env_vars = "env-vars"
self.debug_port = 123
self.debug_args = "args"
self.debugger_path = "/test/path"
self.docker_volume_basedir = "basedir"
self.docker_network = "network"
self.log_file = "logfile"
self.skip_pull_image = True
self.parameter_overrides = {}
self.container_name = "container-name"
self.layer_cache_basedir = "/some/layers/path"
self.force_image_build = True
self.region_name = "region"
self.ctx_mock = Mock()
self.ctx_mock.region = self.region_name
self.host = "host"
self.port = 123
self.static_dir = "staticdir"
@patch("samcli.commands.local.start_api.cli.InvokeContext")
@patch("samcli.commands.local.start_api.cli.LocalApiService")
def test_cli_must_setup_context_and_start_service(self, local_api_service_mock,
invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_api_service_mock.return_value = service_mock
self.call_cli()
invoke_context_mock.assert_called_with(template_file=self.template,
function_identifier=None,
env_vars_file=self.env_vars,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
parameter_overrides=self.parameter_overrides,
container_name=self.container_name,
layer_cache_basedir=self.layer_cache_basedir,
force_image_build=self.force_image_build,
aws_region=self.region_name)
local_api_service_mock.assert_called_with(lambda_invoke_context=context_mock,
port=self.port,
host=self.host,
static_dir=self.static_dir)
service_mock.start.assert_called_with()
@patch("samcli.commands.local.start_api.cli.InvokeContext")
@patch("samcli.commands.local.start_api.cli.LocalApiService")
def test_must_raise_if_no_api_defined(self, local_api_service_mock, invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_api_service_mock.return_value = service_mock
service_mock.start.side_effect = NoApisDefined("no apis")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "Template does not have any APIs connected to Lambda functions"
self.assertEquals(msg, expected)
@parameterized.expand([(InvalidSamDocumentException("bad template"), "bad template"),
(InvalidLayerReference(), "Layer References need to be of type "
"'AWS::Serverless::LayerVersion' or 'AWS::Lambda::LayerVersion'"),
(DebuggingNotSupported("Debugging not supported"), "Debugging not supported")
])
@patch("samcli.commands.local.start_api.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_sam_template(self,
exeception_to_raise,
execption_message,
invoke_context_mock):
invoke_context_mock.side_effect = exeception_to_raise
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = execption_message
self.assertEquals(msg, expected)
@patch("samcli.commands.local.start_api.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock):
invoke_context_mock.side_effect = OverridesNotWellDefinedError("bad env vars")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad env vars"
self.assertEquals(msg, expected)
def call_cli(self):
start_api_cli(ctx=self.ctx_mock,
host=self.host,
port=self.port,
static_dir=self.static_dir,
template=self.template,
env_vars=self.env_vars,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
parameter_overrides=self.parameter_overrides,
container_name=self.container_name,
layer_cache_basedir=self.layer_cache_basedir,
force_image_build=self.force_image_build)
| python | 6,794 |
from polar.exceptions import OsoError
class AuthorizationError(OsoError):
pass
class NotFoundError(AuthorizationError):
"""
Thrown by the ``authorize`` method of an ``Oso`` instance. This error
indicates that the actor is not only not allowed to perform the given
action but also is not allowed to ``"read"`` the given resource.
Most of the time, your app should handle this error by returning a 404 HTTP
error to the client.
To control which action is used for the distinction between
``NotFoundError`` and ``ForbiddenError``, you can customize the
``read_action`` on your ``Oso`` instance.
"""
def __init__(self):
super().__init__(
"Oso NotFoundError -- The current user does not have permission to read "
"the given resource. You should handle this error by returning a 404 "
"error to the client."
)
class ForbiddenError(AuthorizationError):
"""
Thrown by the ``authorize``, ``authorize_field``, and ``authorize_request``
methods when the action is not allowed.
Most of the time, your app should handle this error by returning a 403 HTTP
error to the client.
"""
def __init__(self):
super().__init__(
"Oso ForbiddenError -- The requested action was not allowed for the "
"given resource. You should handle this error by returning a 403 error "
"to the client."
)
| python | 1,458 |
"""MIT License
Copyright (c) 2019-2021 PythonistaGuild
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import logging
from typing import Any, Dict, Union, Optional
import discord
from discord.channel import VoiceChannel
from . import abc
from .pool import Node, NodePool
from .queue import WaitQueue
from .tracks import PartialTrack
from .utils import MISSING
__all__ = ("Player",)
logger: logging.Logger = logging.getLogger(__name__)
VoiceChannel = Union[
discord.VoiceChannel, discord.StageChannel
] # todo: VocalGuildChannel?
class Player(discord.VoiceProtocol):
"""WaveLink Player object.
This class subclasses :class:`discord.VoiceProtocol` and such should be treated as one with additions.
Examples
--------
.. code::
@commands.command()
async def connect(self, channel: discord.VoiceChannel):
voice_client = await channel.connect(cls=wavelink.Player)
.. warning::
This class should not be created manually but can be subclassed to add additional functionality.
You should instead use :meth:`discord.VoiceChannel.connect()` and pass the player object to the cls kwarg.
"""
def __call__(self, client: discord.Client, channel: VoiceChannel):
self.client: discord.Client = client
self.channel: VoiceChannel = channel
return self
def __init__(
self,
client: discord.Client = MISSING,
channel: VoiceChannel = MISSING,
*,
node: Node = MISSING,
):
self.client: discord.Client = client
self.channel: VoiceChannel = channel
if node is MISSING:
node = NodePool.get_node()
self.node: Node = node
self.node._players.append(self)
self._voice_state: Dict[str, Any] = {}
self.last_update: datetime.datetime = MISSING
self.last_position: float = MISSING
self.volume: float = 100
self._paused: bool = False
self._source: Optional[abc.Playable] = None
# self._equalizer = Equalizer.flat()
self.queue = WaitQueue()
@property
def guild(self) -> discord.Guild:
"""The :class:`discord.Guild` this :class:`Player` is in."""
return self.channel.guild
@property
def user(self) -> discord.ClientUser:
"""The :class:`discord.ClientUser` of the :class:`discord.Client`"""
return self.client.user # type: ignore
@property
def source(self) -> Optional[abc.Playable]:
"""The currently playing audio source."""
return self._source
track = source
@property
def position(self) -> float:
"""The current seek position of the playing source in seconds. If nothing is playing this defaults to ``0``."""
if not self.is_playing():
return 0
if self.is_paused():
return min(self.last_position, self.source.duration) # type: ignore
delta = (
datetime.datetime.now(datetime.timezone.utc) - self.last_update
).total_seconds()
position = round(self.last_position + delta, 1)
return min(position, self.source.duration) # type: ignore
async def update_state(self, state: Dict[str, Any]) -> None:
state = state["state"]
self.last_update = datetime.datetime.fromtimestamp(
state.get("time", 0) / 1000, datetime.timezone.utc
)
self.last_position = round(state.get("position", 0) / 1000, 1)
async def on_voice_server_update(self, data: Dict[str, Any]) -> None:
self._voice_state.update({"event": data})
await self._dispatch_voice_update(self._voice_state)
async def on_voice_state_update(self, data: Dict[str, Any]) -> None:
self._voice_state.update({"sessionId": data["session_id"]})
channel_id = data["channel_id"]
if not channel_id: # We're disconnecting
self._voice_state.clear()
return
self.channel = self.guild.get_channel(int(channel_id)) # type: ignore
await self._dispatch_voice_update({**self._voice_state, "event": data})
async def _dispatch_voice_update(self, voice_state: Dict[str, Any]) -> None:
logger.debug(f"Dispatching voice update:: {self.channel.id}")
if {"sessionId", "event"} == self._voice_state.keys():
await self.node._websocket.send(
op="voiceUpdate", guildId=str(self.guild.id), **voice_state
)
async def connect(self, *, timeout: float, reconnect: bool) -> None:
await self.guild.change_voice_state(channel=self.channel)
self._connected = True
logger.info(f"Connected to voice channel:: {self.channel.id}")
async def disconnect(self, *, force: bool) -> None:
try:
logger.info(f"Disconnected from voice channel:: {self.channel.id}")
await self.guild.change_voice_state(channel=None)
self._connected = False
finally:
self.node.players.remove(self)
self.cleanup()
async def move_to(self, channel: discord.VoiceChannel) -> None:
"""|coro|
Moves the player to a different voice channel.
Parameters
-----------
channel: :class:`discord.VoiceChannel`
The channel to move to. Must be a voice channel.
"""
await self.guild.change_voice_state(channel=channel)
logger.info(f"Moving to voice channel:: {channel.id}")
async def play(
self, source: abc.Playable, replace: bool = True, start: int = 0, end: int = 0
):
"""|coro|
Play a WaveLink Track.
Parameters
----------
source: :class:`abc.Playable`
The :class:`abc.Playable` to initiate playing.
replace: bool
Whether or not the current track, if there is one, should be replaced or not. Defaults to ``True``.
start: int
The position to start the player from in milliseconds. Defaults to ``0``.
end: int
The position to end the track on in milliseconds.
By default this always allows the current song to finish playing.
Returns
-------
:class:`wavelink.abc.Playable`
The track that is now playing.
"""
if replace or not self.is_playing():
await self.update_state({"state": {}})
self._paused = False
else:
return
if isinstance(source, PartialTrack):
source = await source._search()
self._source = source
payload = {
"op": "play",
"guildId": str(self.guild.id),
"track": source.id,
"noReplace": not replace,
"startTime": str(start),
}
if end > 0:
payload["endTime"] = str(end)
await self.node._websocket.send(**payload)
logger.debug(f"Started playing track:: {str(source)} ({self.channel.id})")
return source
def is_connected(self) -> bool:
"""Indicates whether the player is connected to voice."""
return self._connected
def is_playing(self) -> bool:
"""Indicates wether a track is currently being played."""
return self.is_connected() and self._source is not None
def is_paused(self) -> bool:
"""Indicates wether the currently playing track is paused."""
return self._paused
async def stop(self) -> None:
"""|coro|
Stop the Player's currently playing song.
"""
await self.node._websocket.send(op="stop", guildId=str(self.guild.id))
logger.debug(f"Current track stopped:: {str(self.source)} ({self.channel.id})")
self._source = None
async def set_pause(self, pause: bool) -> None:
"""|coro|
Set the players paused state.
Parameters
----------
pause: bool
A bool indicating if the player's paused state should be set to True or False.
"""
await self.node._websocket.send(
op="pause", guildId=str(self.guild.id), pause=pause
)
self._paused = pause
logger.info(f"Set pause:: {self._paused} ({self.channel.id})")
async def pause(self) -> None:
"""|coro|
Pauses the player if it was playing.
"""
await self.set_pause(True)
async def resume(self) -> None:
"""|coro|
Resumes the player if it was paused.
"""
await self.set_pause(False)
async def set_volume(self, volume: int) -> None:
"""|coro|
Set the player's volume, between 0 and 1000.
Parameters
----------
volume: int
The volume to set the player to.
"""
self.volume = max(min(volume, 1000), 0)
await self.node._websocket.send(
op="volume", guildId=str(self.guild.id), volume=self.volume
)
logger.debug(f"Set volume:: {self.volume} ({self.channel.id})")
async def seek(self, position: int = 0) -> None:
"""|coro|
Seek to the given position in the song.
Parameters
----------
position: int
The position as an int in milliseconds to seek to. Could be None to seek to beginning.
"""
await self.node._websocket.send(
op="seek", guildId=str(self.guild.id), position=position
)
| python | 10,441 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import warnings
import mxnet as mx
import pytest
from mxnet import gluon
import gluonnlp as nlp
from gluonnlp.base import get_home_dir
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# disabled since it takes a long time to download the model
@pytest.mark.serial
def _test_pretrained_big_text_models():
text_models = ['big_rnn_lm_2048_512']
pretrained_to_test = {'big_rnn_lm_2048_512': 'gbw'}
for model_name in text_models:
eprint('testing forward for %s' % model_name)
pretrained_dataset = pretrained_to_test.get(model_name)
model, _ = nlp.model.get_model(model_name, dataset_name=pretrained_dataset,
pretrained=True)
print(model)
batch_size = 10
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros)
output, state = model(mx.nd.arange(330).reshape((33, 10)), hidden)
output.wait_to_read()
@pytest.mark.serial
@pytest.mark.remote_required
def test_big_text_models(wikitext2_val_and_counter):
# use a small vocabulary for testing
val, val_freq = wikitext2_val_and_counter
vocab = nlp.Vocab(val_freq)
text_models = ['big_rnn_lm_2048_512']
for model_name in text_models:
eprint('testing forward for %s' % model_name)
model, _ = nlp.model.get_model(model_name, vocab=vocab)
print(model)
model.collect_params().initialize()
batch_size = 10
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros)
output, state = model(mx.nd.arange(330).reshape((33, 10)), hidden)
output.wait_to_read()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('dropout_rate', [0.1, 0.0])
@pytest.mark.parametrize('model_dataset', [('transformer_en_de_512', 'WMT2014')])
def test_transformer_models(dropout_rate, model_dataset):
model_name, pretrained_dataset = model_dataset
src = mx.nd.ones((2, 10))
tgt = mx.nd.ones((2, 8))
valid_len = mx.nd.ones((2,))
eprint('testing forward for %s, dropout rate %f' % (model_name, dropout_rate))
with warnings.catch_warnings(): # TODO https://github.com/dmlc/gluon-nlp/issues/978
warnings.simplefilter("ignore")
model, _, _ = nlp.model.get_model(model_name, dataset_name=pretrained_dataset,
pretrained=pretrained_dataset is not None,
dropout=dropout_rate)
print(model)
if not pretrained_dataset:
model.initialize()
output, state = model(src, tgt, src_valid_length=valid_len, tgt_valid_length=valid_len)
output.wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('wo_valid_len', [False, True])
def test_pretrained_roberta_models(wo_valid_len):
models = ['roberta_12_768_12', 'roberta_24_1024_16']
pretrained_datasets = ['openwebtext_ccnews_stories_books_cased']
vocab_size = {'openwebtext_ccnews_stories_books_cased': 50265}
special_tokens = ['<unk>', '<pad>', '<s>', '</s>', '<mask>']
ones = mx.nd.ones((2, 10))
valid_length = mx.nd.ones((2,))
positions = mx.nd.zeros((2, 3))
for model_name in models:
for dataset in pretrained_datasets:
eprint('testing forward for %s on %s' % (model_name, dataset))
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True)
assert len(vocab) == vocab_size[dataset]
for token in special_tokens:
assert token in vocab, "Token %s not found in the vocab" % token
assert vocab['RandomWordByHaibin'] == vocab[vocab.unknown_token]
assert vocab.padding_token == '<pad>'
assert vocab.unknown_token == '<unk>'
assert vocab.bos_token == '<s>'
assert vocab.eos_token == '</s>'
model.hybridize()
if wo_valid_len:
output = model(ones, masked_positions=positions)
else:
output = model(ones, valid_length, positions)
output[0].wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('wo_valid_len', [False, True])
def test_pretrained_distilbert_models(wo_valid_len):
models = ['distilbert_6_768_12']
pretrained_datasets = ['distilbert_book_corpus_wiki_en_uncased']
vocab_size = {'distilbert_book_corpus_wiki_en_uncased': 30522}
special_tokens = ['[UNK]', '[PAD]', '[SEP]', '[CLS]', '[MASK]']
ones = mx.nd.ones((2, 10))
valid_length = mx.nd.ones((2,))
for model_name in models:
for dataset in pretrained_datasets:
eprint('testing forward for %s on %s' % (model_name, dataset))
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
root='tests/data/model/')
assert len(vocab) == vocab_size[dataset]
for token in special_tokens:
assert token in vocab, "Token %s not found in the vocab" % token
assert vocab['RandomWordByHaibin'] == vocab[vocab.unknown_token]
assert vocab.padding_token == '[PAD]'
assert vocab.unknown_token == '[UNK]'
model.hybridize()
if wo_valid_len:
output = model(ones)
else:
output = model(ones, valid_length)
output[0].wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('disable_missing_parameters', [False, True])
def test_pretrained_bert_models(disable_missing_parameters):
models = ['bert_12_768_12', 'bert_24_1024_16']
pretrained = {
'bert_12_768_12': [
'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased', 'wiki_multilingual_uncased',
'openwebtext_book_corpus_wiki_en_uncased', 'wiki_multilingual_cased', 'wiki_cn_cased', 'scibert_scivocab_uncased',
'scibert_scivocab_cased', 'scibert_basevocab_uncased', 'scibert_basevocab_cased',
'biobert_v1.0_pmc_cased', 'biobert_v1.0_pubmed_cased', 'biobert_v1.0_pubmed_pmc_cased',
'biobert_v1.1_pubmed_cased', 'clinicalbert_uncased', 'kobert_news_wiki_ko_cased'
],
'bert_24_1024_16': ['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased']
}
vocab_size = {'book_corpus_wiki_en_cased': 28996,
'book_corpus_wiki_en_uncased': 30522,
'openwebtext_book_corpus_wiki_en_uncased': 30522,
'wiki_multilingual_cased': 119547,
'wiki_cn_cased': 21128,
'wiki_multilingual_uncased': 105879,
'scibert_scivocab_uncased': 31090,
'scibert_scivocab_cased': 31116,
'scibert_basevocab_uncased': 30522,
'scibert_basevocab_cased': 28996,
'biobert_v1.0_pubmed_cased': 28996,
'biobert_v1.0_pmc_cased': 28996,
'biobert_v1.0_pubmed_pmc_cased': 28996,
'biobert_v1.1_pubmed_cased': 28996,
'clinicalbert_uncased': 30522,
'kobert_news_wiki_ko_cased': 8002}
special_tokens = ['[UNK]', '[PAD]', '[SEP]', '[CLS]', '[MASK]']
ones = mx.nd.ones((2, 10))
valid_length = mx.nd.ones((2,))
positions = mx.nd.zeros((2, 3))
for model_name in models:
pretrained_datasets = pretrained.get(model_name)
for dataset in pretrained_datasets:
has_missing_params = any(n in dataset for n in ('biobert', 'clinicalbert'))
if not has_missing_params and disable_missing_parameters:
# No parameters to disable for models pretrained on this dataset
continue
eprint('testing forward for %s on %s' % (model_name, dataset))
if not has_missing_params:
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True)
else:
with pytest.raises(AssertionError):
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True)
if not disable_missing_parameters:
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
pretrained_allow_missing=True)
elif 'biobert' in dataset:
# Biobert specific test case
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
pretrained_allow_missing=True,
use_decoder=False,
use_classifier=False)
elif 'clinicalbert' in dataset:
# Clinicalbert specific test case
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
pretrained_allow_missing=True,
use_decoder=False)
else:
assert False, "Testcase needs to be adapted."
assert len(vocab) == vocab_size[dataset]
for token in special_tokens:
assert token in vocab, "Token %s not found in the vocab" % token
assert vocab['RandomWordByHaibin'] == vocab[vocab.unknown_token]
assert vocab.padding_token == '[PAD]'
assert vocab.unknown_token == '[UNK]'
assert vocab.bos_token is None
assert vocab.eos_token is None
if has_missing_params and not disable_missing_parameters:
with pytest.raises(RuntimeError):
output = model(ones, ones, valid_length, positions)
output[0].wait_to_read()
else:
output = model(ones, ones, valid_length, positions)
output[0].wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('hparam_allow_override', [False, True])
def test_pretrained_bert_models_override(hparam_allow_override):
models = ['bert_12_768_12', 'bert_24_1024_16',
'roberta_12_768_12', 'roberta_24_1024_16']
pretrained = {
'bert_12_768_12': ['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased'],
'bert_24_1024_16': ['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased'],
'roberta_12_768_12': ['openwebtext_ccnews_stories_books_cased'],
'roberta_24_1024_16': ['openwebtext_ccnews_stories_books_cased']
}
ones = mx.nd.ones((2, 10))
valid_length = mx.nd.ones((2,))
positions = mx.nd.zeros((2, 3))
for model_name in models:
pretrained_datasets = pretrained.get(model_name)
for dataset in pretrained_datasets:
eprint('testing forward for %s on %s' % (model_name, dataset))
if hparam_allow_override:
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
root='tests/data/model/',
hparam_allow_override=hparam_allow_override,
ignore_extra=True,
num_layers=6)
else:
with pytest.raises(AssertionError):
model, vocab = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=True,
root='tests/data/model/',
num_layers=6)
continue
if 'roberta' in model_name:
output = model(ones, valid_length, positions)
else:
output = model(ones, ones, valid_length, positions)
output[0].wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('wo_valid_len', [False, True])
def test_bert_models(wo_valid_len):
models = ['bert_12_768_12', 'bert_24_1024_16']
layers = [12, 24]
attention_heads = [12, 16]
units = [768, 1024]
dataset = 'book_corpus_wiki_en_uncased'
vocab_size = 30522
batch_size = 2
seq_len = 3
num_masks = 2
ones = mx.nd.ones((batch_size, seq_len))
valid_length = mx.nd.ones((batch_size, ))
positions = mx.nd.ones((batch_size, num_masks))
kwargs = [{'use_pooler': False, 'use_decoder': False, 'use_classifier': False},
{'use_pooler': True, 'use_decoder': False, 'use_classifier': False},
{'use_pooler': True, 'use_decoder': True, 'use_classifier': False},
{'use_pooler': True, 'use_decoder': True, 'use_classifier': True},
{'use_pooler': False, 'use_decoder': False, 'use_classifier': False,
'output_attention': True},
{'use_pooler': False, 'use_decoder': False, 'use_classifier': False,
'output_attention': True, 'output_all_encodings': True},
{'use_pooler': True, 'use_decoder': True, 'use_classifier': True,
'output_attention': True, 'output_all_encodings': True}]
def infer_shape(shapes, unit):
inferred_shapes = []
for shape in shapes:
inferred_shape = list(shape)
if inferred_shape[-1] == -1:
inferred_shape[-1] = unit
inferred_shapes.append(tuple(inferred_shape))
return inferred_shapes
def get_shapes(output):
if not isinstance(output, (list, tuple)):
return [output.shape]
shapes = []
for out in output:
collect_shapes(out, shapes)
return shapes
def collect_shapes(item, shapes):
if not isinstance(item, (list, tuple)):
shapes.append(item.shape)
return
for child in item:
collect_shapes(child, shapes)
for model_name, layer, unit, head in zip(models, layers, units, attention_heads):
eprint('testing forward for %s' % model_name)
expected_shapes = [
[(batch_size, seq_len, -1)],
[(batch_size, seq_len, -1),
(batch_size, -1)],
[(batch_size, seq_len, -1),
(batch_size, -1),
(batch_size, num_masks, vocab_size)],
[(batch_size, seq_len, -1),
(batch_size, -1),
(batch_size, 2),
(batch_size, num_masks, vocab_size)],
[(batch_size, seq_len, -1)] + [(batch_size, head, seq_len, seq_len)] * layer,
[(batch_size, seq_len, -1)] * layer + [(batch_size, head, seq_len, seq_len)] * layer,
[(batch_size, seq_len, -1)] * layer + [(batch_size, head, seq_len, seq_len)] * layer +
[(batch_size, -1)] + [(batch_size, 2)] + [(batch_size, num_masks, vocab_size)],
]
for kwarg, expected_shape in zip(kwargs, expected_shapes):
eprint('testing forward for %s' % str(kwarg))
expected_shape = infer_shape(expected_shape, unit)
model, _ = nlp.model.get_model(model_name, dataset_name=dataset,
pretrained=False, **kwarg)
model.initialize()
model.hybridize()
if kwarg['use_decoder']:
# position tensor is required for decoding
if wo_valid_len:
output = model(ones, ones, masked_positions=positions)
else:
output = model(ones, ones, valid_length, positions)
else:
if wo_valid_len:
output = model(ones, ones)
else:
output = model(ones, ones, valid_length)
out_shapes = get_shapes(output)
assert out_shapes == expected_shape, (out_shapes, expected_shape)
sync_instance = output[0] if not isinstance(output[0], list) else output[0][0]
sync_instance.wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
def test_language_models():
text_models = ['standard_lstm_lm_200', 'standard_lstm_lm_650',
'standard_lstm_lm_1500', 'awd_lstm_lm_1150', 'awd_lstm_lm_600']
pretrained_to_test = {'standard_lstm_lm_1500': 'wikitext-2',
'standard_lstm_lm_650': 'wikitext-2',
'standard_lstm_lm_200': 'wikitext-2',
'awd_lstm_lm_1150': 'wikitext-2',
'awd_lstm_lm_600': 'wikitext-2'}
for model_name in text_models:
eprint('testing forward for %s' % model_name)
pretrained_dataset = pretrained_to_test.get(model_name)
model, _ = nlp.model.get_model(model_name, dataset_name=pretrained_dataset,
pretrained=pretrained_dataset is not None)
print(model)
if not pretrained_dataset:
model.collect_params().initialize()
output, state = model(mx.nd.arange(330).reshape(33, 10))
output.wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
@pytest.mark.remote_required
def test_cache_models():
cache_language_models = ['awd_lstm_lm_1150', 'awd_lstm_lm_600', 'standard_lstm_lm_200',
'standard_lstm_lm_650', 'standard_lstm_lm_1500']
datasets = ['wikitext-2']
for name in cache_language_models:
for dataset_name in datasets:
cache_cell = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2)
outs, word_history, cache_history, hidden = cache_cell(mx.nd.arange(
10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
print(cache_cell)
print("outs:")
print(outs)
print("word_history:")
print(word_history)
print("cache_history:")
print(cache_history)
@pytest.mark.serial
@pytest.mark.remote_required
def test_get_cache_model_noncache_models():
language_models_params = {
'awd_lstm_lm_1150': 'awd_lstm_lm_1150_wikitext-2-f9562ed0.params',
'awd_lstm_lm_600': 'awd_lstm_lm_600_wikitext-2-e952becc.params',
'standard_lstm_lm_200': 'standard_lstm_lm_200_wikitext-2-b233c700.params',
'standard_lstm_lm_650': 'standard_lstm_lm_650_wikitext-2-631f3904.params',
'standard_lstm_lm_1500': 'standard_lstm_lm_1500_wikitext-2-a4163513.params'}
datasets = ['wikitext-2']
for name in language_models_params.keys():
for dataset_name in datasets:
_, vocab = nlp.model.get_model(name=name, dataset_name=dataset_name, pretrained=True)
ntokens = len(vocab)
cache_cell_0 = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2)
print(cache_cell_0)
model, _ = nlp.model.get_model(name=name, dataset_name=dataset_name, pretrained=True)
cache_cell_1 = nlp.model.train.CacheCell(
model, ntokens, window=1, theta=0.6, lambdas=0.2)
cache_cell_1.load_parameters(
os.path.join(get_home_dir(), 'models', language_models_params.get(name)))
print(cache_cell_1)
outs0, word_history0, cache_history0, hidden0 = cache_cell_0(
mx.nd.arange(10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
outs1, word_history1, cache_history1, hidden1 = cache_cell_1(
mx.nd.arange(10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
assert outs0.shape == outs1.shape, outs0.shape
assert len(word_history0) == len(word_history1), len(word_history0)
assert len(cache_history0) == len(cache_history1), len(cache_history0)
assert len(hidden0) == len(hidden1), len(hidden0)
@pytest.mark.serial
@pytest.mark.remote_required
def test_save_load_cache_models():
cache_language_models = ['awd_lstm_lm_1150', 'awd_lstm_lm_600', 'standard_lstm_lm_200',
'standard_lstm_lm_650', 'standard_lstm_lm_1500']
datasets = ['wikitext-2']
for name in cache_language_models:
for dataset_name in datasets:
cache_cell = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2)
print(cache_cell)
cache_cell.save_parameters(
os.path.join(get_home_dir(), 'models', name + '-' + dataset_name + '.params'))
cache_cell.load_parameters(
os.path.join(get_home_dir(), 'models', name + '-' + dataset_name + '.params'))
@pytest.mark.serial
def test_save_load_big_rnn_models(tmp_path):
ctx = mx.cpu()
seq_len = 1
batch_size = 1
num_sampled = 6
# network
eval_model = nlp.model.language_model.BigRNN(10, 2, 3, 4, 5, 0.1, prefix='bigrnn')
model = nlp.model.language_model.train.BigRNN(10, 2, 3, 4, 5, num_sampled, 0.1,
prefix='bigrnn')
loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
# verify param names
model_params = sorted(model.collect_params().keys())
eval_model_params = sorted(eval_model.collect_params().keys())
for p0, p1 in zip(model_params, eval_model_params):
assert p0 == p1, (p0, p1)
model.initialize(mx.init.Xavier(), ctx=ctx)
trainer = mx.gluon.Trainer(model.collect_params(), 'sgd')
# prepare data, label and samples
x = mx.nd.ones((seq_len, batch_size))
y = mx.nd.ones((seq_len, batch_size))
sampled_cls = mx.nd.ones((num_sampled,))
sampled_cls_cnt = mx.nd.ones((num_sampled,))
true_cls_cnt = mx.nd.ones((seq_len, batch_size))
samples = (sampled_cls, sampled_cls_cnt, true_cls_cnt)
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros, ctx=ctx)
# test forward
with mx.autograd.record():
pred, hidden, new_y = model(x, y, hidden, samples)
assert pred.shape == (seq_len, batch_size, 1 + num_sampled)
assert new_y.shape == (seq_len, batch_size)
pred = pred.reshape((-3, -1))
new_y = new_y.reshape((-1,))
l = loss(pred, new_y)
l.backward()
mx.nd.waitall()
path = os.path.join(str(tmp_path), 'test_save_load_big_rnn_models.params')
model.save_parameters(path)
eval_model.load_parameters(path)
def test_big_rnn_model_share_params():
ctx = mx.cpu()
seq_len = 2
batch_size = 1
num_sampled = 6
vocab_size = 10
shape = (seq_len, batch_size)
model = nlp.model.language_model.train.BigRNN(vocab_size, 2, 3, 4, 5, num_sampled, 0.1,
prefix='bigrnn', sparse_weight=False,
sparse_grad=False)
loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
model.initialize(mx.init.Xavier(), ctx=ctx)
trainer = mx.gluon.Trainer(model.collect_params(), 'sgd')
batch_size = 1
x = mx.nd.ones(shape)
y = mx.nd.ones(shape)
sampled_cls = mx.nd.ones((num_sampled,))
sampled_cls_cnt = mx.nd.ones((num_sampled,))
true_cls_cnt = mx.nd.ones(shape)
samples = (sampled_cls, sampled_cls_cnt, true_cls_cnt)
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros, ctx=ctx)
with mx.autograd.record():
pred, hidden, new_y = model(x, y, hidden, samples)
assert pred.shape == (seq_len, batch_size, 1 + num_sampled)
assert new_y.shape == (seq_len, batch_size)
pred = pred.reshape((-3, -1))
new_y = new_y.reshape((-1,))
l = loss(pred, new_y)
l.backward()
assert model.decoder.weight._grad_stype == 'default'
mx.nd.waitall()
eval_model = nlp.model.language_model.BigRNN(vocab_size, 2, 3, 4, 5, 0.1, prefix='bigrnn',
params=model.collect_params())
eval_model.hybridize()
pred, hidden = eval_model(x, hidden)
assert pred.shape == (seq_len, batch_size, vocab_size)
mx.nd.waitall()
def test_weight_drop():
class RefBiLSTM(gluon.Block):
def __init__(self, size, **kwargs):
super(RefBiLSTM, self).__init__(**kwargs)
with self.name_scope():
self._lstm_fwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='l0')
self._lstm_bwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='r0')
def forward(self, inpt):
fwd = self._lstm_fwd(inpt)
bwd_inpt = mx.nd.flip(inpt, 0)
bwd = self._lstm_bwd(bwd_inpt)
bwd = mx.nd.flip(bwd, 0)
return mx.nd.concat(fwd, bwd, dim=2)
net1 = RefBiLSTM(10)
shared_net1 = RefBiLSTM(10, params=net1.collect_params())
net2 = gluon.rnn.LSTM(10)
shared_net2 = gluon.rnn.LSTM(10, params=net2.collect_params())
net3 = gluon.nn.HybridSequential()
net3.add(gluon.rnn.LSTM(10))
shared_net3 = gluon.nn.HybridSequential(params=net3.collect_params())
shared_net3.add(gluon.rnn.LSTM(10, params=net3[0].collect_params()))
x = mx.random.uniform(shape=(3, 4, 5))
nets = [(net1, shared_net1),
(net2, shared_net2),
(net3, shared_net3)]
for net, shared_net in nets:
net.initialize('uniform')
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
with mx.autograd.train_mode():
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
grads = {}
with mx.autograd.record():
y = net(x)
y.backward()
for name, param in net.collect_params().items():
grads[name] = param.grad().copy()
with mx.autograd.record():
y = shared_net(x)
y.backward()
for name, param in shared_net.collect_params().items():
mx.test_utils.assert_almost_equal(grads[name].asnumpy(), param.grad().asnumpy())
drop_rate = 0.5
nlp.model.utils.apply_weight_drop(net, '.*h2h_weight', drop_rate)
with mx.autograd.predict_mode():
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
with mx.autograd.train_mode():
assert not mx.test_utils.almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
grads = {}
with mx.autograd.record():
y = net(x)
y.backward()
for name, param in net.collect_params().items():
grads[name] = param.grad().copy()
with mx.autograd.record():
y = shared_net(x)
y.backward()
for name, param in shared_net.collect_params().items():
assert not mx.test_utils.almost_equal(grads[name].asnumpy(), param.grad().asnumpy())
def test_gelu():
x = mx.random.uniform(shape=(3, 4, 5))
net = nlp.model.GELU()
y = net(x)
assert y.shape == x.shape
y.wait_to_read()
def test_transformer_encoder():
batch_size = 2
seq_length = 5
units = 768
inputs = mx.random.uniform(shape=(batch_size, seq_length, units))
mask = mx.nd.ones([batch_size, seq_length, seq_length])
cell = nlp.model.TransformerEncoderCell(units=768, hidden_size=3072, num_heads=12,
attention_cell='multi_head', dropout=0.0,
use_residual=True, scaled=True,
output_attention=False,
prefix='transformer_cell')
cell.collect_params().initialize()
cell.hybridize()
outputs, attention_weights = cell(inputs, mask)
outputs.wait_to_read()
mx.nd.waitall()
assert outputs.shape == (batch_size, seq_length, units)
| python | 29,849 |
#!/usr/bin/env python3
from pprint import pprint
import click
from mitmproxy.io import tnetstring
def read_tnetstring(input):
# tnetstring throw a ValueError on EOF, which is hard to catch
# because they raise ValueErrors for a couple of other reasons.
# Check for EOF to avoid this.
if not input.read(1):
return None
else:
input.seek(-1, 1)
return tnetstring.load(input)
@click.command()
@click.argument("input", type=click.File("rb"))
def inspect(input):
"""
pretty-print a dumpfile
"""
while True:
data = read_tnetstring(input)
if not data:
break
pprint(data)
if __name__ == "__main__":
inspect()
| python | 704 |
from __future__ import print_function
from PIL import Image
import numpy as np
import os
import torchvision
import math
def tensor2im(img, imtype=np.uint8, unnormalize=True, idx=0, nrows=None):
# select a sample or create grid if img is a batch
if len(img.shape) == 4:
nrows = nrows if nrows is not None else int(math.sqrt(img.size(0)))
img = img[idx] if idx >= 0 else torchvision.utils.make_grid(img, nrows)
img = img.cpu().float()
if unnormalize:
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
for i, m, s in zip(img, mean, std):
i.mul_(s).add_(m)
image_numpy = img.numpy()
image_numpy_t = np.transpose(image_numpy, (1, 2, 0))
image_numpy_t = image_numpy_t*254.0
return image_numpy_t.astype(imtype)
def tensor2maskim(mask, imtype=np.uint8, idx=0, nrows=1):
im = tensor2im(mask, imtype=imtype, idx=idx, unnormalize=False, nrows=nrows)
if im.shape[2] == 1:
im = np.repeat(im, 3, axis=-1)
return im
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def save_image(image_numpy, image_path):
mkdir(os.path.dirname(image_path))
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def save_str_data(data, path):
mkdir(os.path.dirname(path))
np.savetxt(path, data, delimiter=",", fmt="%s") | python | 1,522 |
"""Test ZHA registries."""
from unittest import mock
import pytest
import homeassistant.components.zha.core.registries as registries
MANUFACTURER = "mock manufacturer"
MODEL = "mock model"
@pytest.fixture
def zha_device():
"""Return a mock of ZHA device."""
dev = mock.MagicMock()
dev.manufacturer = MANUFACTURER
dev.model = MODEL
return dev
@pytest.fixture
def channels(channel):
"""Return a mock of channels."""
return [channel("level", 8), channel("on_off", 6)]
@pytest.mark.parametrize(
"rule, matched",
[
(registries.MatchRule(), False),
(registries.MatchRule(channel_names={"level"}), True),
(registries.MatchRule(channel_names={"level", "no match"}), False),
(registries.MatchRule(channel_names={"on_off"}), True),
(registries.MatchRule(channel_names={"on_off", "no match"}), False),
(registries.MatchRule(channel_names={"on_off", "level"}), True),
(registries.MatchRule(channel_names={"on_off", "level", "no match"}), False),
# test generic_id matching
(registries.MatchRule(generic_ids={"channel_0x0006"}), True),
(registries.MatchRule(generic_ids={"channel_0x0008"}), True),
(registries.MatchRule(generic_ids={"channel_0x0006", "channel_0x0008"}), True),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"}
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
),
True,
),
# manufacturer matching
(registries.MatchRule(manufacturers="no match"), False),
(registries.MatchRule(manufacturers=MANUFACTURER), True),
(
registries.MatchRule(manufacturers="no match", aux_channels="aux_channel"),
False,
),
(
registries.MatchRule(
manufacturers=MANUFACTURER, aux_channels="aux_channel"
),
True,
),
(registries.MatchRule(models=MODEL), True),
(registries.MatchRule(models="no match"), False),
(registries.MatchRule(models=MODEL, aux_channels="aux_channel"), True),
(registries.MatchRule(models="no match", aux_channels="aux_channel"), False),
# match everything
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
manufacturers=MANUFACTURER,
models=MODEL,
),
True,
),
(
registries.MatchRule(
channel_names="on_off", manufacturers={"random manuf", MANUFACTURER}
),
True,
),
(
registries.MatchRule(
channel_names="on_off", manufacturers={"random manuf", "Another manuf"}
),
False,
),
(
registries.MatchRule(
channel_names="on_off", manufacturers=lambda x: x == MANUFACTURER
),
True,
),
(
registries.MatchRule(
channel_names="on_off", manufacturers=lambda x: x != MANUFACTURER
),
False,
),
(
registries.MatchRule(
channel_names="on_off", models={"random model", MODEL}
),
True,
),
(
registries.MatchRule(
channel_names="on_off", models={"random model", "Another model"}
),
False,
),
(
registries.MatchRule(channel_names="on_off", models=lambda x: x == MODEL),
True,
),
(
registries.MatchRule(channel_names="on_off", models=lambda x: x != MODEL),
False,
),
],
)
def test_registry_matching(rule, matched, channels):
"""Test strict rule matching."""
assert rule.strict_matched(MANUFACTURER, MODEL, channels) is matched
@pytest.mark.parametrize(
"rule, matched",
[
(registries.MatchRule(), False),
(registries.MatchRule(channel_names={"level"}), True),
(registries.MatchRule(channel_names={"level", "no match"}), False),
(registries.MatchRule(channel_names={"on_off"}), True),
(registries.MatchRule(channel_names={"on_off", "no match"}), False),
(registries.MatchRule(channel_names={"on_off", "level"}), True),
(registries.MatchRule(channel_names={"on_off", "level", "no match"}), False),
(
registries.MatchRule(channel_names={"on_off", "level"}, models="no match"),
True,
),
(
registries.MatchRule(
channel_names={"on_off", "level"},
models="no match",
manufacturers="no match",
),
True,
),
(
registries.MatchRule(
channel_names={"on_off", "level"},
models="no match",
manufacturers=MANUFACTURER,
),
True,
),
# test generic_id matching
(registries.MatchRule(generic_ids={"channel_0x0006"}), True),
(registries.MatchRule(generic_ids={"channel_0x0008"}), True),
(registries.MatchRule(generic_ids={"channel_0x0006", "channel_0x0008"}), True),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"}
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"},
models="mo match",
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"},
models=MODEL,
),
True,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
),
True,
),
# manufacturer matching
(registries.MatchRule(manufacturers="no match"), False),
(registries.MatchRule(manufacturers=MANUFACTURER), True),
(registries.MatchRule(models=MODEL), True),
(registries.MatchRule(models="no match"), False),
# match everything
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
manufacturers=MANUFACTURER,
models=MODEL,
),
True,
),
],
)
def test_registry_loose_matching(rule, matched, channels):
"""Test loose rule matching."""
assert rule.loose_matched(MANUFACTURER, MODEL, channels) is matched
def test_match_rule_claim_channels_color(channel):
"""Test channel claiming."""
ch_color = channel("color", 0x300)
ch_level = channel("level", 8)
ch_onoff = channel("on_off", 6)
rule = registries.MatchRule(channel_names="on_off", aux_channels={"color", "level"})
claimed = rule.claim_channels([ch_color, ch_level, ch_onoff])
assert {"color", "level", "on_off"} == {ch.name for ch in claimed}
@pytest.mark.parametrize(
"rule, match",
[
(registries.MatchRule(channel_names={"level"}), {"level"}),
(registries.MatchRule(channel_names={"level", "no match"}), {"level"}),
(registries.MatchRule(channel_names={"on_off"}), {"on_off"}),
(registries.MatchRule(generic_ids="channel_0x0000"), {"basic"}),
(
registries.MatchRule(channel_names="level", generic_ids="channel_0x0000"),
{"basic", "level"},
),
(registries.MatchRule(channel_names={"level", "power"}), {"level", "power"}),
(
registries.MatchRule(
channel_names={"level", "on_off"}, aux_channels={"basic", "power"}
),
{"basic", "level", "on_off", "power"},
),
(registries.MatchRule(channel_names={"color"}), set()),
],
)
def test_match_rule_claim_channels(rule, match, channel, channels):
"""Test channel claiming."""
ch_basic = channel("basic", 0)
channels.append(ch_basic)
ch_power = channel("power", 1)
channels.append(ch_power)
claimed = rule.claim_channels(channels)
assert match == {ch.name for ch in claimed}
@pytest.fixture
def entity_registry():
"""Registry fixture."""
return registries.ZHAEntityRegistry()
@pytest.mark.parametrize(
"manufacturer, model, match_name",
(
("random manufacturer", "random model", "OnOff"),
("random manufacturer", MODEL, "OnOffModel"),
(MANUFACTURER, "random model", "OnOffManufacturer"),
(MANUFACTURER, MODEL, "OnOffModelManufacturer"),
(MANUFACTURER, "some model", "OnOffMultimodel"),
),
)
def test_weighted_match(channel, entity_registry, manufacturer, model, match_name):
"""Test weightedd match."""
s = mock.sentinel
@entity_registry.strict_match(
s.component,
channel_names="on_off",
models={MODEL, "another model", "some model"},
)
class OnOffMultimodel:
pass
@entity_registry.strict_match(s.component, channel_names="on_off")
class OnOff:
pass
@entity_registry.strict_match(
s.component, channel_names="on_off", manufacturers=MANUFACTURER
)
class OnOffManufacturer:
pass
@entity_registry.strict_match(s.component, channel_names="on_off", models=MODEL)
class OnOffModel:
pass
@entity_registry.strict_match(
s.component, channel_names="on_off", models=MODEL, manufacturers=MANUFACTURER
)
class OnOffModelManufacturer:
pass
ch_on_off = channel("on_off", 6)
ch_level = channel("level", 8)
match, claimed = entity_registry.get_entity(
s.component, manufacturer, model, [ch_on_off, ch_level]
)
assert match.__name__ == match_name
assert claimed == [ch_on_off]
def test_multi_sensor_match(channel, entity_registry):
"""Test multi-entity match."""
s = mock.sentinel
@entity_registry.multipass_match(
s.binary_sensor,
channel_names="smartenergy_metering",
)
class SmartEnergySensor2:
pass
ch_se = channel("smartenergy_metering", 0x0702)
ch_illuminati = channel("illuminance", 0x0401)
match, claimed = entity_registry.get_multi_entity(
"manufacturer", "model", channels=[ch_se, ch_illuminati]
)
assert s.binary_sensor in match
assert s.component not in match
assert set(claimed) == {ch_se}
assert {cls.entity_class.__name__ for cls in match[s.binary_sensor]} == {
SmartEnergySensor2.__name__
}
@entity_registry.multipass_match(
s.component, channel_names="smartenergy_metering", aux_channels="illuminance"
)
class SmartEnergySensor1:
pass
@entity_registry.multipass_match(
s.binary_sensor,
channel_names="smartenergy_metering",
aux_channels="illuminance",
)
class SmartEnergySensor3:
pass
match, claimed = entity_registry.get_multi_entity(
"manufacturer", "model", channels={ch_se, ch_illuminati}
)
assert s.binary_sensor in match
assert s.component in match
assert set(claimed) == {ch_se, ch_illuminati}
assert {cls.entity_class.__name__ for cls in match[s.binary_sensor]} == {
SmartEnergySensor2.__name__,
SmartEnergySensor3.__name__,
}
assert {cls.entity_class.__name__ for cls in match[s.component]} == {
SmartEnergySensor1.__name__
}
| python | 11,901 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produce interpolation in the joint model trained by `train_joint.py`.
This script produces interpolation on one side of the joint model as a series of
images, as well as in other side of the model through paralleling,
image-by-image transformation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
from magenta.models.latent_transfer import common
from magenta.models.latent_transfer import common_joint
from magenta.models.latent_transfer import model_joint
import numpy as np
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('config', 'transfer_A_unconditional_mnist_to_mnist',
'The name of the model config to use.')
tf.flags.DEFINE_string('exp_uid_A', '_exp_0', 'exp_uid for data_A')
tf.flags.DEFINE_string('exp_uid_B', '_exp_1', 'exp_uid for data_B')
tf.flags.DEFINE_string('exp_uid', '_exp_0',
'String to append to config for filenames/directories.')
tf.flags.DEFINE_integer('n_iters', 100000, 'Number of iterations.')
tf.flags.DEFINE_integer('n_iters_per_save', 5000, 'Iterations per a save.')
tf.flags.DEFINE_integer('n_iters_per_eval', 5000,
'Iterations per a evaluation.')
tf.flags.DEFINE_integer('random_seed', 19260817, 'Random seed')
tf.flags.DEFINE_string('exp_uid_classifier', '_exp_0', 'exp_uid for classifier')
# For Overriding configs
tf.flags.DEFINE_integer('n_latent', 64, '')
tf.flags.DEFINE_integer('n_latent_shared', 2, '')
tf.flags.DEFINE_float('prior_loss_beta_A', 0.01, '')
tf.flags.DEFINE_float('prior_loss_beta_B', 0.01, '')
tf.flags.DEFINE_float('prior_loss_align_beta', 0.0, '')
tf.flags.DEFINE_float('mean_recons_A_align_beta', 0.0, '')
tf.flags.DEFINE_float('mean_recons_B_align_beta', 0.0, '')
tf.flags.DEFINE_float('mean_recons_A_to_B_align_beta', 0.0, '')
tf.flags.DEFINE_float('mean_recons_B_to_A_align_beta', 0.0, '')
tf.flags.DEFINE_integer('pairing_number', 1024, '')
# For controling interpolation
tf.flags.DEFINE_integer('load_ckpt_iter', 0, '')
tf.flags.DEFINE_string('interpolate_labels', '',
'a `,` separated list of 0-indexed labels.')
tf.flags.DEFINE_integer('nb_images_between_labels', 1, '')
def load_config(config_name):
return importlib.import_module('configs.%s' % config_name).config
def main(unused_argv):
# pylint:disable=unused-variable
# Reason:
# This training script relys on many programmatical call to function and
# access to variables. Pylint cannot infer this case so it emits false alarm
# of unused-variable if we do not disable this warning.
# pylint:disable=invalid-name
# Reason:
# Following variables have their name consider to be invalid by pylint so
# we disable the warning.
# - Variable that in its name has A or B indictating their belonging of
# one side of data.
del unused_argv
# Load main config
config_name = FLAGS.config
config = load_config(config_name)
config_name_A = config['config_A']
config_name_B = config['config_B']
config_name_classifier_A = config['config_classifier_A']
config_name_classifier_B = config['config_classifier_B']
# Load dataset
dataset_A = common_joint.load_dataset(config_name_A, FLAGS.exp_uid_A)
(dataset_blob_A, train_data_A, train_label_A, train_mu_A, train_sigma_A,
index_grouped_by_label_A) = dataset_A
dataset_B = common_joint.load_dataset(config_name_B, FLAGS.exp_uid_B)
(dataset_blob_B, train_data_B, train_label_B, train_mu_B, train_sigma_B,
index_grouped_by_label_B) = dataset_B
# Prepare directories
dirs = common_joint.prepare_dirs('joint', config_name, FLAGS.exp_uid)
save_dir, sample_dir = dirs
# Set random seed
np.random.seed(FLAGS.random_seed)
tf.set_random_seed(FLAGS.random_seed)
# Real Training.
tf.reset_default_graph()
sess = tf.Session()
# Load model's architecture (= build)
one_side_helper_A = common_joint.OneSideHelper(config_name_A, FLAGS.exp_uid_A,
config_name_classifier_A,
FLAGS.exp_uid_classifier)
one_side_helper_B = common_joint.OneSideHelper(config_name_B, FLAGS.exp_uid_B,
config_name_classifier_B,
FLAGS.exp_uid_classifier)
m = common_joint.load_model(model_joint.Model, config_name, FLAGS.exp_uid)
# Initialize and restore
sess.run(tf.global_variables_initializer())
one_side_helper_A.restore(dataset_blob_A)
one_side_helper_B.restore(dataset_blob_B)
# Restore from ckpt
config_name = FLAGS.config
model_uid = common.get_model_uid(config_name, FLAGS.exp_uid)
save_name = os.path.join(
save_dir, 'transfer_%s_%d.ckpt' % (model_uid, FLAGS.load_ckpt_iter))
m.vae_saver.restore(sess, save_name)
# prepare intepolate dir
intepolate_dir = os.path.join(
sample_dir, 'interpolate_sample', '%010d' % FLAGS.load_ckpt_iter)
tf.gfile.MakeDirs(intepolate_dir)
# things
interpolate_labels = [int(_) for _ in FLAGS.interpolate_labels.split(',')]
nb_images_between_labels = FLAGS.nb_images_between_labels
index_list_A = []
last_pos = [0] * 10
for label in interpolate_labels:
index_list_A.append(index_grouped_by_label_A[label][last_pos[label]])
last_pos[label] += 1
index_list_B = []
last_pos = [-1] * 10
for label in interpolate_labels:
index_list_B.append(index_grouped_by_label_B[label][last_pos[label]])
last_pos[label] -= 1
z_A = []
z_A.append(train_mu_A[index_list_A[0]])
for i_label in range(1, len(interpolate_labels)):
last_z_A = z_A[-1]
this_z_A = train_mu_A[index_list_A[i_label]]
for j in range(1, nb_images_between_labels + 1):
z_A.append(last_z_A +
(this_z_A - last_z_A) * (float(j) / nb_images_between_labels))
z_B = []
z_B.append(train_mu_B[index_list_B[0]])
for i_label in range(1, len(interpolate_labels)):
last_z_B = z_B[-1]
this_z_B = train_mu_B[index_list_B[i_label]]
for j in range(1, nb_images_between_labels + 1):
z_B.append(last_z_B +
(this_z_B - last_z_B) * (float(j) / nb_images_between_labels))
z_B_tr = []
for this_z_A in z_A:
this_z_B_tr = sess.run(m.x_A_to_B_direct, {m.x_A: np.array([this_z_A])})
z_B_tr.append(this_z_B_tr[0])
# Generate data domain instances and save.
z_A = np.array(z_A)
x_A = one_side_helper_A.m_helper.decode(z_A)
x_A = common.post_proc(x_A, one_side_helper_A.m_helper.config)
batched_x_A = common.batch_image(
x_A,
max_images=len(x_A),
rows=len(x_A),
cols=1,
)
common.save_image(batched_x_A, os.path.join(intepolate_dir, 'x_A.png'))
z_B = np.array(z_B)
x_B = one_side_helper_B.m_helper.decode(z_B)
x_B = common.post_proc(x_B, one_side_helper_B.m_helper.config)
batched_x_B = common.batch_image(
x_B,
max_images=len(x_B),
rows=len(x_B),
cols=1,
)
common.save_image(batched_x_B, os.path.join(intepolate_dir, 'x_B.png'))
z_B_tr = np.array(z_B_tr)
x_B_tr = one_side_helper_B.m_helper.decode(z_B_tr)
x_B_tr = common.post_proc(x_B_tr, one_side_helper_B.m_helper.config)
batched_x_B_tr = common.batch_image(
x_B_tr,
max_images=len(x_B_tr),
rows=len(x_B_tr),
cols=1,
)
common.save_image(batched_x_B_tr, os.path.join(intepolate_dir, 'x_B_tr.png'))
if __name__ == '__main__':
tf.app.run(main)
| python | 8,074 |
"""Unit test package for medstat."""
| python | 37 |
"""Presence Detection adapter for Mozilla WebThings Gateway."""
from datetime import datetime, timedelta
from gateway_addon import Adapter, Database
import json
import os
import re
import threading
import ipaddress
from .presence_device import PresenceDevice
from .util import valid_ip, valid_mac, clamp, get_ip, ping, arping, arp, printDebug
OUI_FILE = 'oui.txt'
SEPARATORS = ('-', ':')
BUFFER_SIZE = 1024 * 8
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
_CONFIG_PATHS = [
os.path.join(os.path.expanduser('~'), '.mozilla-iot', 'config'),
]
if 'MOZIOT_HOME' in os.environ:
_CONFIG_PATHS.insert(0, os.path.join(os.environ['MOZIOT_HOME'], 'config'))
class PresenceAdapter(Adapter):
"""Adapter for network presence detection"""
def __init__(self, verbose=True):
"""
Initialize the object.
verbose -- whether or not to enable verbose logging
"""
print("Initialising adapter from class")
self.pairing = False
self.name = self.__class__.__name__
Adapter.__init__(self,
'network-presence-detection-adapter',
'network-presence-detection-adapter',
verbose=verbose)
print("Adapter ID = " + self.get_id())
self.DEBUG = False #True
self.memory_in_weeks = 10 # How many weeks a device will be remembered as a possible device.
self.time_window = 60 # How many minutes should a device be away before we concider it away?
self.arping = False # Does the user also want to try using arping?
self.defaultIpAddress = ''
self.defaultSubnetMask = ''
self.add_from_config() # Here we get data from the settings in the Gateway interface.
self.own_ip = 'unknown' # We scan only scan if the device itself has an IP address.
self.ip_range = {} # we remember which IP addresses had a device. This makes them extra interesting, and they will get extra attention during scans.
self.deep_scan_frequency = 10 # once every 10 scans we do a deep scan.
self.scan_count = 0 # Used by the deep scan system.
self.filename = None
for path in _CONFIG_PATHS:
if os.path.isdir(path):
self.filename = os.path.join(
path,
'network-presence-detection-adapter-devices.json'
)
self.should_save = False
# make sure the file exists:
if self.filename:
try:
with open(self.filename) as file_object:
printDebug("Loading json..", self.DEBUG)
try:
self.previously_found = json.load(file_object)
except:
printDebug("Empty json file", self.DEBUG)
self.previously_found = {}
printDebug("Previously found items: = " + str(self.previously_found), self.DEBUG)
except (IOError, ValueError):
self.previously_found = {}
print("Failed to load JSON file, generating new one.")
with open(self.filename, 'w') as f:
f.write('{}')
else:
self.previously_found = {}
# Remove devices that have not been seen in a long time
self.prune()
# Present all devices to the gateway
for key in self.previously_found:
self._add_device(str(key), str(self.previously_found[key]['name']), str('...')) # Adding the device
#_id = 'presence-{}'.format(key)
self.devices[key].add_boolean_child('recently1', "Recently spotted", False)
self.devices[key].add_integer_child('minutes_ago', "Minutes ago last seen", 99999)
# Start the thread that updates the 'minutes ago' countdown on all lastseen properties.
#t = threading.Thread(target=self.update_minutes_ago)
#t.daemon = True
#t.start()
# We continuously scan for new devices, in an endless loop. The 255 addresses are split into a few chunks, and each chunk is given to a separate thread.
self.own_ip = self.defaultIpAddress if self.defaultIpAddress != '' else get_ip()
if valid_ip(self.own_ip):
while True:
#def split_processing(items, num_splits=4):
old_previous_found_count = len(self.previously_found)
thread_count = 5
# if default ip and subnet are provided
if self.defaultIpAddress and self.defaultSubnetMask:
ip_addresses = [str(i) for i in ipaddress.ip_network(self.defaultIpAddress + '/' + self.defaultSubnetMask, False).hosts()]
else:
start = 0
end = 255
ip_addresses = [str(self.own_ip[:self.own_ip.rfind(".")]) + "." + str(ip_byte4) for ip_byte4 in range(start, end)]
split_size = round(len(ip_addresses) / thread_count + 0.5)
threads = []
for ips in [ip_addresses[i:i + split_size] for i in range(0, len(ip_addresses), split_size)]:
# determine the indices of the list this thread will handle
# Create the thread
threads.append(
threading.Thread(target=self.scan, args=(ips,)))
threads[-1].daemon = True
threads[-1].start() # start the thread we just created
# Wait for all threads to finish
for t in threads:
t.join()
printDebug("All threads are done", self.DEBUG)
# If new devices were found, save the JSON file.
if len(self.previously_found) > old_previous_found_count:
self.save_to_json()
self.update_the_rest()
#self.scan()
#time.sleep(60)
'''
def update_minutes_ago(self):
t = threading.Timer(60.0, self.update_minutes_ago)
t.daemon = True
t.start()
print("~~thread minutes ago updater")
#time.sleep(300)
for key in self.devices:
print("~~thread is checking out a device: " + str(key))
if 'minutes_ago' in self.devices[key].properties:
print("~~thread is updating minutes ago: " + str(key))
current_minutes_ago_value = self.devices[key].properties['minutes_ago'].value
self.devices[key].properties['minutes_ago'].update(current_minutes_ago_value + 1)
#time.sleep(10)
'''
def unload(self):
print("Presence detector is being unloaded")
self.save_to_json()
def remove_thing(self, device_id):
printDebug("-----REMOVING------", self.DEBUG)
try:
printDebug("THING TO REMOVE:" + str(self.devices[device_id]), self.DEBUG)
del self.previously_found[device_id]
#print("2")
obj = self.get_device(device_id)
#print("3")
self.handle_device_removed(obj)
print("Removed presence detection device")
except:
print("REMOVING PRESENCE DETECTION THING FAILED")
#del self.devices[device_id]
self.should_save = True # saving changes to the json persistence file
def scan(self, ip_addresses):
self.scan_count += 1
if self.scan_count == self.deep_scan_frequency:
self.scan_count = 0
self.should_save = False # We only save found_devices to a file if new devices have been found during this scan.
# # skip broadcast addresses
# if start == 0:
# start = 1
# if end == 255:
# end = 254
# when halfway through, start a new thread.
for ip_address in ip_addresses: # skip broadcast addresses
if ip_address.endswith('.255'):
continue
printDebug("", self.DEBUG)
printDebug(ip_address, self.DEBUG)
# Skip our own IP address.
if ip_address == self.own_ip:
continue
# IP Addresses that have been populated before get extra scrutiny.
if ip_address in self.ip_range:
ping_count = 4
else:
ping_count = 1
# Once in a while we do a deep scan of the entire network, and give each IP address a larger number of tries before moving on.
if self.scan_count == 0:
ping_count = 4
printDebug("-scan intensity: " + str(ping_count), self.DEBUG)
alive = False # holds whether we got any response.
if ping(ip_address, ping_count) == 0: # 0 means everything went ok, so a device was found.
alive = True
elif self.arping:
if arping(ip_address, ping_count) == 0: # 0 means everything went ok, so a device was found.
alive = True
# If either ping or arping found a device:
if alive:
self.ip_range[ip_address] = 1000 # This IP address is of high interest. For the next 1000 iterations is will get extra attention.
printDebug("-ALIVE", self.DEBUG)
output = arp(ip_address)
printDebug(str(output), self.DEBUG)
mac_addresses = re.findall(r'(([0-9a-fA-F]{1,2}:){5}[0-9a-fA-F]{1,2})', output)
now = datetime.timestamp(datetime.now())
if len(mac_addresses) > 0:
mac_address = mac_addresses[0][0]
mac_address = ':'.join([
# Left pad the MAC address parts with '0' in case of
# invalid output (as on macOS)
'0' * (2 - len(x)) + x for x in mac_address.split(':')
])
if not valid_mac(mac_address):
continue
mac_address = mac_address.replace(":", "")
_id = 'presence-{}'.format(mac_address)
printDebug("early mac = " + mac_address, self.DEBUG)
# Get the basic variables
found_device_name = output.split(' ')[0]
printDebug("early found device name = " + found_device_name, self.DEBUG)
if found_device_name == '?' or valid_ip(found_device_name):
vendor = 'unnamed'
try:
# Get the vendor name, and shorten it. It removes
# everything after the comma. Thus "Apple, inc"
# becomes "Apple"
vendor = get_vendor(mac_address)
if vendor is not None:
vendor = vendor.split(' ', 1)[0]
vendor = vendor.split(',', 1)[0]
else:
vendor = 'unnamed'
except ValueError:
pass
found_device_name = "Presence - " + vendor
else:
found_device_name = "Presence - " + found_device_name
printDebug("--mac: " + mac_address, self.DEBUG)
printDebug("--name: " + found_device_name, self.DEBUG)
printDebug("--_id: " + _id, self.DEBUG)
# Create or update items in the previously_found dictionary
try:
possibleName = ''
if _id not in self.previously_found:
self.should_save = True # We will be adding this new device to the list, and then save that updated list.
i = 2 # We skip "1" as a number. So we will get names like "Apple" and then "Apple 2", "Apple 3", and so on.
possibleName = found_device_name
could_be_same_same = True
while could_be_same_same is True: # We check if this name already exists in the list of previously found devices.
could_be_same_same = False
for item in self.previously_found.values():
if possibleName == item['name']: # The name already existed in the list, so we change it a little bit and compare again.
could_be_same_same = True
#print("names collided")
possibleName = found_device_name + " " + str(i)
i += 1
self.previously_found[str(_id)] = { # adding it to the internal object
'name': str(possibleName),
'lastseen': now,
}
else:
printDebug(" -mac address already known", self.DEBUG)
self.previously_found[_id]['lastseen'] = now
possibleName = self.previously_found[_id]['name']
except Exception as ex:
print("Error updating items in the previously_found dictionary: " + str(ex))
printDebug("--_id is now: " + _id, self.DEBUG)
# Present new device to the WebThings gateway, or update them.
#print("propos: " + str( self.get_devices() ))
try:
if _id not in self.devices: # Add device if it does not exist.
# Present new device to the WebThings gateway
printDebug("not _id in self.devices", self.DEBUG)
self._add_device(str(_id), str(possibleName), str(ip_address)) # The device did not exist yet, so we're creating it.
printDebug("Presented new device to gateway:" + str(possibleName), self.DEBUG)
else:
if 'details' in self.devices[_id].properties:
if ip_address != '':
printDebug("UPDATING DETAILS for " + _id, self.DEBUG)
self.devices[_id].properties['details'].update(str(ip_address))
else:
pass
printDebug("ip_address was empty, so not updating the details property.", self.DEBUG)
else:
pass
printDebug("The details property did not exist? Does the device even exist?", self.DEBUG)
except Exception as ex:
print("Error presenting new device to the WebThings gateway, or updating them.: " + str(ex))
# Present new device properties to the WebThings gateway, or update them.
try:
if 'recently1' not in self.devices[_id].properties:
# add the property
print()
print("While updating, noticed device did not yet have the recently spotted property. Adding now.")
self.devices[_id].add_boolean_child('recently1', "Recently spotted", True)
else:
self.devices[_id].properties['recently1'].update(True)
if 'minutes_ago' not in self.devices[_id].properties:
# add the property
print()
print("While updating, noticed device did not yet have the minutes ago property. Adding now.")
self.devices[_id].add_integer_child('minutes_ago', "Minutes ago last seen", 0)
else:
self.devices[_id].properties['minutes_ago'].update(0)
except Exception as ex:
print("Error presenting new device properties to the WebThings gateway, or updating them.: " + str(ex))
# If no device was found at this IP address:
else:
if ip_address in self.ip_range:
if self.ip_range[ip_address] == 0:
self.ip_range.pop(ip_address)
else:
self.ip_range[ip_address] = self.ip_range[ip_address] - 1
def update_the_rest(self):
# We go over the list of ALL previously found devices, including the ones not found in the scan, and update them.
try:
printDebug("", self.DEBUG)
#past = datetime.now() - timedelta(hours=1)
nowstamp = datetime.timestamp(datetime.now())
#past = datetime.now() - timedelta(minutes=self.time_window)
#paststamp = datetime.timestamp(past) # A moment in the past that we compare against.
for key in self.previously_found:
printDebug("", self.DEBUG)
#_id = 'presence-{}'.format(key)
#print("Updating: " + str(_id))
try:
# Make sure all devices and properties exist. Should be superfluous really.
if key not in self.devices:
self._add_device(key, self.previously_found[key]['name'], '...') # The device did not exist yet, so we're creating it.
if 'recently1' not in self.devices[key].properties:
self.devices[key].add_boolean_child('recently1', "Recently spotted", False)
if 'minutes_ago' not in self.devices[key].properties:
self.devices[key].add_integer_child('minutes_ago', "Minutes ago last seen", 99999)
# Update devices
self.previously_found[key]['lastseen']
minutes_ago = int((nowstamp - self.previously_found[key]['lastseen']) / 60)
printDebug("-Minutes ago: " + str(minutes_ago), self.DEBUG)
#minutes_ago = int( ( - paststamp) / 60 )
if minutes_ago > self.time_window:
printDebug("BYE! " + str(key) + " was last seen over " + str(self.time_window) + " ago", self.DEBUG)
self.devices[key].properties['recently1'].update(False)
self.devices[key].properties['minutes_ago'].update(99999) # It's not great, but what other options are there?
else:
printDebug("HI! " + str(key) + " was spotted less than " + str(self.time_window) + " minutes ago", self.DEBUG)
self.devices[key].properties['recently1'].update(True)
self.devices[key].properties['minutes_ago'].update(minutes_ago)
except Exception as ex:
print("Could not create or update property. Error: " + str(ex))
except Exception as ex:
print("Error while updating device: " + str(ex))
# Here we remove devices that haven't been spotted in a long time.
self.prune()
def _add_device(self, mac, name, details):
"""
Add the given device, if necessary.
"""
try:
printDebug("adapter._add_device: " + str(name), self.DEBUG)
device = PresenceDevice(self, mac, name, details)
self.handle_device_added(device)
printDebug("-Adapter has finished adding new device for mac " + str(mac), self.DEBUG)
except Exception as ex:
print("Error adding new device: " + str(ex))
return
def prune(self):
# adding already known devices back into the system. Maybe only devices seen in the last few months?
try:
print()
too_old = datetime.now() - timedelta(weeks=self.memory_in_weeks)
printDebug("Too old threshold: " + str(too_old), self.DEBUG)
too_old_timestamp = datetime.timestamp(too_old)
items_to_remove = []
for key in self.previously_found:
printDebug("Updating: " + str(key), self.DEBUG)# + "," + str(item)
item = self.previously_found[key]
#lastSpottedTime = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')
if too_old_timestamp > item['lastseen']:
print(str(key) + " was pruned from the list of all found devices")
items_to_remove.append(key)
else:
printDebug(str(key) + " was not too old", self.DEBUG)
pass
if len(items_to_remove):
for remove_me in items_to_remove:
self.previously_found.pop(remove_me, None)
#del self.previously_found[remove_me]
self.save_to_json()
except Exception as ex:
print("Error pruning found devices list: " + str(ex))
def add_from_config(self):
"""Attempt to add all configured devices."""
try:
database = Database('network-presence-detection-adapter')
if not database.open():
return
config = database.load_config()
database.close()
if not config or 'Memory' not in config or 'Time window' not in config:
print("Required variables not found in config database?")
return
self.memory_in_weeks = clamp(int(config['Memory']), 1, 50) # The variable is clamped: it is forced to be between 1 and 50.
self.time_window = clamp(int(config['Time window']), 1, 1380) # 'Grace period' could also be a good name.
print("Memory value from settings page: " + str(self.memory_in_weeks))
print("Time window value from settings page: " + str(self.time_window))
if 'Arping' in config:
self.arping = config['Arping'] # boolean.
if 'Default IP address' in config:
self.defaultIpAddress = config['Default IP address'] #string
if 'Default subnet mask' in config:
self.defaultSubnetMask = config['Default subnet mask'] #string
if 'Debug messages' in config:
self.DEBUG = config['Debug messages'] # boolean
print("Config loaded ok")
except:
print("Error getting config data from database")
def save_to_json(self):
try:
print("Saving updated list of found devices to json file")
if self.previously_found and self.filename:
with open(self.filename, 'w') as fp:
json.dump(self.previously_found, fp)
except:
print("Saving to json file failed")
def cancel_pairing(self):
"""Cancel the pairing process."""
self.pairing = False
self.save_to_json()
# I couldn't get the import to work, so I just copied some of the code here:
# It was made by Victor Oliveira ([email protected])
def get_vendor(mac, oui_file=OUI_FILE):
mac_clean = mac
for separator in SEPARATORS:
mac_clean = ''.join(mac_clean.split(separator))
try:
int(mac_clean, 16)
except ValueError:
raise ValueError('Invalid MAC address.')
mac_size = len(mac_clean)
if mac_size > 12 or mac_size < 6:
raise ValueError('Invalid MAC address.')
with open(os.path.join(__location__, oui_file)) as file:
mac_half = mac_clean[0:6]
mac_half_upper = mac_half.upper()
while True:
line = file.readline()
if line:
if line.startswith(mac_half_upper):
vendor = line.strip().split('\t')[-1]
return vendor
else:
break
| python | 24,458 |
import itertools
from datetime import timedelta
import pandas as pd
from pandas.tseries.frequencies import to_offset
from pandarallel.utils.tools import chunk, PROGRESSION
class RollingGroupBy:
@staticmethod
def reduce(results, _):
return pd.concat(results, copy=False)
@staticmethod
def get_chunks(nb_workers, rolling_groupby, *args, **kwargs):
chunks = chunk(len(rolling_groupby._groupby), nb_workers)
iterator = iter(rolling_groupby._groupby)
for chunk_ in chunks:
yield [next(iterator) for _ in range(chunk_.stop - chunk_.start)]
@staticmethod
def att2value(rolling):
attributes = {
attribute: getattr(rolling, attribute) for attribute in rolling._attributes
}
# Fix window for win_type = freq, because then it was defined by the user in a format like '1D' and refers
# to a time window rolling
if "win_type" in attributes and attributes["win_type"] == "freq":
window = to_offset(timedelta(microseconds=int(attributes["window"] / 1000)))
attributes["window"] = window
attributes.pop("win_type")
return attributes
@staticmethod
def worker(
tuples, index, attribute2value, queue, progress_bar, func, *args, **kwargs
):
# TODO: See if this pd.concat is avoidable
results = []
for iteration, (name, df) in enumerate(tuples):
item = df.rolling(**attribute2value).apply(func, *args, **kwargs)
item.index = pd.MultiIndex.from_product([[name], item.index])
results.append(item)
if progress_bar:
queue.put_nowait((PROGRESSION, (index, iteration)))
return pd.concat(results)
| python | 1,762 |
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# SH
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.views.generic import TemplateView
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.urls import path
from django.conf.urls import url
from django.contrib.auth import logout
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST) # using Django UserRegistrationForm
if form.is_valid():
form.save() # if the form is valid than save it to database
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to login')
return redirect('/login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form}) # if form invalid than redirect to register.html
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
# SH
def group_check(request):
group_name = Group.objects.all().filter(user=request.user) # get logget user grouped name
group_name = str(group_name[0]) # convert to string
if "Student" == group_name:
return redirect('http://127.0.0.1:8000/student/')
elif "Teacher" == group_name:
return redirect('http://127.0.0.1:8000/teacher/')
def logout_view(request):
logout(request)
return redirect('http://127.0.0.1:8000/')
class register_teacher(TemplateView):
template_name = "register_teacher.html"
class register_student(TemplateView):
template_name = "register_student.html"
| python | 2,535 |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : [email protected]
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : Dive-into-Deep-Learning
@File : sec0201.py
@Version : v0.1
@Time : 2020-12-27 上午11:30
@License : (C)Copyright 2018-2020, zYx.Tom
@Reference : 《动手学深度学习》
@Desc : 2.2.1 创建 NDArray
@理解
"""
from mxnet import nd
from tools import beep_end
# ----------------------------------------------------------------------
def create_data():
x = nd.arange(12)
print("x=", x)
print("x.shape=", x.shape)
print("x.size=", x.size)
return x
def reshape_data(X):
x = X.reshape(3, 4)
print("x.reshape=", x)
return x
def zero_data():
x = nd.zeros((2, 3, 4))
print("zero x=", x)
return x
def one_data():
x = nd.ones((3, 4))
print("one x=", x)
return x
def init_data():
y = nd.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
print("init y=", y)
return y
def random_data():
x = nd.random.normal(0, 1, shape=(3, 4))
print("random x=", x)
pass
def main():
X = create_data()
reshape_data(X)
zero_data()
one_data()
init_data()
random_data()
if __name__ == '__main__':
main()
beep_end()
| python | 1,327 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing constants for all the statically named queue.
They are located inside this module so they can be referenced inside multiple places without
encountering cylic import issues.
"""
from __future__ import absolute_import
from kombu import Queue
from st2common.constants import action as action_constants
from st2common.transport import actionexecutionstate
from st2common.transport import announcement
from st2common.transport import execution
from st2common.transport import liveaction
from st2common.transport import publishers
from st2common.transport import reactor
from st2common.transport import workflow
__all__ = [
'ACTIONSCHEDULER_REQUEST_QUEUE',
'ACTIONRUNNER_WORK_QUEUE',
'ACTIONRUNNER_CANCEL_QUEUE',
'ACTIONRUNNER_PAUSE_QUEUE',
'ACTIONRUNNER_RESUME_QUEUE',
'EXPORTER_WORK_QUEUE',
'NOTIFIER_ACTIONUPDATE_WORK_QUEUE',
'RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE',
'RULESENGINE_WORK_QUEUE',
'STREAM_ANNOUNCEMENT_WORK_QUEUE',
'STREAM_EXECUTION_ALL_WORK_QUEUE',
'STREAM_EXECUTION_UPDATE_WORK_QUEUE',
'STREAM_LIVEACTION_WORK_QUEUE',
'WORKFLOW_EXECUTION_WORK_QUEUE',
'WORKFLOW_EXECUTION_RESUME_QUEUE'
]
# Used by the action scheduler service
ACTIONSCHEDULER_REQUEST_QUEUE = liveaction.get_status_management_queue(
'st2.actionrunner.req',
routing_key=action_constants.LIVEACTION_STATUS_REQUESTED)
# Used by the action runner service
ACTIONRUNNER_WORK_QUEUE = liveaction.get_status_management_queue(
'st2.actionrunner.work',
routing_key=action_constants.LIVEACTION_STATUS_SCHEDULED)
ACTIONRUNNER_CANCEL_QUEUE = liveaction.get_status_management_queue(
'st2.actionrunner.cancel',
routing_key=action_constants.LIVEACTION_STATUS_CANCELING)
ACTIONRUNNER_PAUSE_QUEUE = liveaction.get_status_management_queue(
'st2.actionrunner.pause',
routing_key=action_constants.LIVEACTION_STATUS_PAUSING)
ACTIONRUNNER_RESUME_QUEUE = liveaction.get_status_management_queue(
'st2.actionrunner.resume',
routing_key=action_constants.LIVEACTION_STATUS_RESUMING)
# Used by the exporter service
EXPORTER_WORK_QUEUE = execution.get_queue(
'st2.exporter.work',
routing_key=publishers.UPDATE_RK)
# Used by the notifier service
NOTIFIER_ACTIONUPDATE_WORK_QUEUE = execution.get_queue(
'st2.notifiers.execution.work',
routing_key=publishers.UPDATE_RK)
# Used by the results tracker service
RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE = actionexecutionstate.get_queue(
'st2.resultstracker.work',
routing_key=publishers.CREATE_RK)
# Used by the rules engine service
RULESENGINE_WORK_QUEUE = reactor.get_trigger_instances_queue(
name='st2.trigger_instances_dispatch.rules_engine',
routing_key='#')
# Used by the stream service
STREAM_ANNOUNCEMENT_WORK_QUEUE = announcement.get_queue(
routing_key=publishers.ANY_RK,
exclusive=True,
auto_delete=True)
STREAM_EXECUTION_ALL_WORK_QUEUE = execution.get_queue(
routing_key=publishers.ANY_RK,
exclusive=True,
auto_delete=True)
STREAM_EXECUTION_UPDATE_WORK_QUEUE = execution.get_queue(
routing_key=publishers.UPDATE_RK,
exclusive=True,
auto_delete=True)
STREAM_LIVEACTION_WORK_QUEUE = Queue(
None,
liveaction.LIVEACTION_XCHG,
routing_key=publishers.ANY_RK,
exclusive=True,
auto_delete=True)
# TODO: Perhaps we should use pack.action name as routing key
# so we can do more efficient filtering later, if needed
STREAM_EXECUTION_OUTPUT_QUEUE = execution.get_output_queue(
name=None,
routing_key=publishers.CREATE_RK,
exclusive=True,
auto_delete=True)
# Used by the workflow engine service
WORKFLOW_EXECUTION_WORK_QUEUE = workflow.get_status_management_queue(
name='st2.workflow.work',
routing_key=action_constants.LIVEACTION_STATUS_REQUESTED)
WORKFLOW_EXECUTION_RESUME_QUEUE = workflow.get_status_management_queue(
name='st2.workflow.resume',
routing_key=action_constants.LIVEACTION_STATUS_RESUMING)
WORKFLOW_ACTION_EXECUTION_UPDATE_QUEUE = execution.get_queue(
'st2.workflow.action.update',
routing_key=publishers.UPDATE_RK)
| python | 4,883 |
# -*- coding: utf-8 -*-
# Copyright 2015 Pietro Brunetti <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Defining search dialogs """
__authors__ = "Pietro Brunetti"
import wx
import re
import DialogCommons
class Dialog(wx.Dialog):
""" Search Dialog class"""
def __init__( self, text,
parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False):
"""Initialize a search dialog"""
sizer = DialogCommons.createMainSizer(self, parent, ID, title, pos,
size, style)
self._createMask(sizer, text)
DialogCommons.createBtnSizer(self, sizer,
"The OK button to start the Search")
self.SetSizer(sizer)
sizer.Fit(self)
def _createMask(self, sizer, text):
""" Create search mask """
megabox = wx.FlexGridSizer(1, 2, 3, 3)
megabox.AddGrowableCol(1)
l_prot = wx.StaticText(self, -1, text)
megabox.Add(l_prot, 0, wx.ALIGN_CENTER_VERTICAL)
self.t_prot = wx.TextCtrl(self, -1)
megabox.Add(self.t_prot, 0, wx.EXPAND)
sizer.Add(megabox, 1, wx.EXPAND|wx.ALL, 5)
def GetValue(self):
return self.t_prot.GetValue()
| python | 1,828 |
"""
antecedent_consequent.py : Contains Antecedent and Consequent classes.
"""
import numpy as np
import networkx as nx
from .state import StatefulProperty
from ..fuzzymath import interp_membership
from .fuzzyvariable import FuzzyVariable
from ..defuzzify import defuzz
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
def accu_max(*args):
return np.max(args)
class Antecedent(FuzzyVariable):
"""
Antecedent (input/sensor) variable for a fuzzy control system.
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array.
label : string
Name of the universe variable.
"""
# Customized subclass of `FuzzyVariable`
input = StatefulProperty(None)
def __init__(self, universe, label):
"""""" + Antecedent.__doc__
super(Antecedent, self).__init__(universe, label)
self.__name__ = 'Antecedent'
@property
def graph(self):
g = nx.DiGraph()
for t in self.terms.values():
g.add_path([self, t])
return g
class Consequent(FuzzyVariable):
"""
Consequent (output/control) variable for a fuzzy control system.
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array.
label : string
Name of the universe variable.
Notes
-----
The ``label`` string chosen must be unique among Antecedents and
Consequents in the ``ControlSystem``.
"""
# Customized subclass of `FuzzyVariable`
output = StatefulProperty(None)
def __init__(self, universe, label):
"""""" + Consequent.__doc__
super(Consequent, self).__init__(universe, label)
self.__name__ = 'Consequent'
# Default accumulation method is to take the max of any cut
self.accumulation_method = accu_max
@property
def graph(self):
g = nx.DiGraph()
for t in self.terms.values():
g.add_path([t, self])
return g
| python | 2,130 |
# -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.auth.models import AbstractUser
from .aes_decryptor import Prpcrypt
# Create your models here.
# 角色分两种:
# 1.工程师:可以提交SQL上线单的工程师们,username字段为登录用户名,display字段为展示的中文名。
# 2.审核人:可以审核并执行SQL上线单的管理者、高级工程师、系统管理员们。
class users(AbstractUser):
display = models.CharField('显示的中文名', max_length=50)
role = models.CharField('角色', max_length=20, choices=(('工程师', '工程师'), ('审核人', '审核人'), ('DBA', 'DBA')), default='工程师')
is_ldapuser = models.BooleanField('ldap用戶', default=False)
def __str__(self):
return self.username
class Meta:
verbose_name = u'用户配置'
verbose_name_plural = u'用户配置'
# 各个线上主库地址。
class master_config(models.Model):
cluster_name = models.CharField('集群名称', max_length=50, unique=True)
master_host = models.CharField('主库地址', max_length=200)
master_port = models.IntegerField('主库端口', default=3306)
master_user = models.CharField('登录主库的用户名', max_length=100)
master_password = models.CharField('登录主库的密码', max_length=300)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
def __str__(self):
return self.cluster_name
class Meta:
verbose_name = u'主库地址配置'
verbose_name_plural = u'主库地址配置'
def save(self, *args, **kwargs):
pc = Prpcrypt() # 初始化
self.master_password = pc.encrypt(self.master_password)
super(master_config, self).save(*args, **kwargs)
# 存放各个SQL上线工单的详细内容,可定期归档或清理历史数据,也可通过alter table workflow row_format=compressed; 来进行压缩
class workflow(models.Model):
workflow_name = models.CharField('工单内容', max_length=50)
engineer = models.CharField('发起人', max_length=50)
review_man = models.CharField('审核人', max_length=50)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
finish_time = models.DateTimeField('结束时间', null=True, blank=True)
status = models.CharField(max_length=50, choices=(
('已正常结束', '已正常结束'), ('人工终止流程', '人工终止流程'), ('自动审核中', '自动审核中'), ('等待审核人审核', '等待审核人审核'), ('审核通过', '审核通过'),
('定时执行', '定时执行'), ('执行中', '执行中'), ('自动审核不通过', '自动审核不通过'), ('执行有异常', '执行有异常')))
# is_backup = models.IntegerField('是否备份,0为否,1为是', choices=((0,0),(1,1)))
is_backup = models.CharField('是否备份', choices=(('否', '否'), ('是', '是')), max_length=20)
review_content = models.TextField('自动审核内容的JSON格式')
cluster_name = models.CharField('集群名称', max_length=50)
reviewok_time = models.DateTimeField('人工审核通过的时间', null=True, blank=True)
sql_content = models.TextField('具体sql内容')
execute_result = models.TextField('执行结果的JSON格式', blank=True)
is_manual = models.IntegerField('是否手工执行', choices=((0, '否'), (1, '是')), default=0)
audit_remark = models.TextField('审核备注', null=True, blank=True)
def __str__(self):
return self.workflow_name
class Meta:
verbose_name = u'SQL工单管理'
verbose_name_plural = u'SQL工单管理'
# 各个线上从库地址
class slave_config(models.Model):
cluster_name = models.CharField('集群名称', max_length=50, unique=True)
slave_host = models.CharField('从库地址', max_length=200)
slave_port = models.IntegerField('从库端口', default=3306)
slave_user = models.CharField('登录从库的用户名', max_length=100)
slave_password = models.CharField('登录从库的密码', max_length=300)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Meta:
verbose_name = u'从库地址配置'
verbose_name_plural = u'从库地址配置'
def save(self, *args, **kwargs):
pc = Prpcrypt() # 初始化
self.slave_password = pc.encrypt(self.slave_password)
super(slave_config, self).save(*args, **kwargs)
# 工作流审核主表
class WorkflowAudit(models.Model):
audit_id = models.AutoField(primary_key=True)
workflow_id = models.BigIntegerField('关联业务id')
workflow_type = models.IntegerField('申请类型',
choices=((1, '查询权限申请'),))
workflow_title = models.CharField('申请标题', max_length=50)
workflow_remark = models.CharField('申请备注', default='', max_length=140)
audit_users = models.CharField('审核人列表', max_length=255)
current_audit_user = models.CharField('当前审核人', max_length=20)
next_audit_user = models.CharField('下级审核人', max_length=20)
current_status = models.IntegerField('审核状态', choices=((0, '待审核'), (1, '审核通过'), (2, '审核不通过'), (3, '审核取消')))
create_user = models.CharField('申请人', max_length=20)
create_time = models.DateTimeField('申请时间', auto_now_add=True)
sys_time = models.DateTimeField('系统时间', auto_now=True)
def __int__(self):
return self.audit_id
class Meta:
db_table = 'workflow_audit'
unique_together = ('workflow_id', 'workflow_type')
verbose_name = u'工作流列表'
verbose_name_plural = u'工作流列表'
# 审批明细表
class WorkflowAuditDetail(models.Model):
audit_detail_id = models.AutoField(primary_key=True)
audit_id = models.ForeignKey(WorkflowAudit, db_constraint=False, to_field='audit_id',
db_column='audit_id', verbose_name='审核主表id')
audit_user = models.CharField('审核人', max_length=20)
audit_time = models.DateTimeField('审核时间')
audit_status = models.IntegerField('审核状态', choices=((0, '待审核'), (1, '审核通过'), (2, '审核不通过'), (3, '审核取消')), )
remark = models.CharField('审核备注', default='', max_length=140)
sys_time = models.DateTimeField('系统时间', auto_now=True)
def __int__(self):
return self.audit_detail_id
class Meta:
db_table = 'workflow_audit_detail'
verbose_name = u'审批明细表'
verbose_name_plural = u'审批明细表'
# 审批配置表
class WorkflowAuditSetting(models.Model):
audit_setting_id = models.AutoField(primary_key=True)
workflow_type = models.IntegerField('申请类型,', choices=((1, '查询权限申请'),), unique=True)
audit_users = models.CharField('审核人,单人审核格式为:user1,多级审核格式为:user1,user2', max_length=255)
create_time = models.DateTimeField(auto_now_add=True)
sys_time = models.DateTimeField(auto_now=True)
def __int__(self):
return self.audit_setting_id
class Meta:
db_table = 'workflow_audit_setting'
verbose_name = u'工作流配置'
verbose_name_plural = u'工作流配置'
# 查询权限申请记录表
class QueryPrivilegesApply(models.Model):
apply_id = models.AutoField(primary_key=True)
title = models.CharField('申请标题', max_length=50)
user_name = models.CharField('申请人', max_length=30)
cluster_name = models.CharField('集群名称', max_length=50)
db_list = models.TextField('数据库')
table_list = models.TextField('表')
valid_date = models.DateField('有效时间')
limit_num = models.IntegerField('行数限制', default=100)
priv_type = models.IntegerField('权限类型', choices=((1, 'DATABASE'), (2, 'TABLE'),), default=0)
status = models.IntegerField('审核状态', choices=((0, '待审核'), (1, '审核通过'), (2, '审核不通过'), (3, '审核取消')), )
create_time = models.DateTimeField(auto_now_add=True)
sys_time = models.DateTimeField(auto_now=True)
def __int__(self):
return self.apply_id
class Meta:
db_table = 'query_privileges_apply'
verbose_name = u'查询权限申请记录表'
verbose_name_plural = u'查询权限申请记录表'
# 用户权限关系表
class QueryPrivileges(models.Model):
privilege_id = models.AutoField(primary_key=True)
user_name = models.CharField('用户名', max_length=30)
cluster_name = models.CharField('集群名称', max_length=50)
db_name = models.CharField('数据库', max_length=200)
table_name = models.CharField('表', max_length=200)
valid_date = models.DateField('有效时间')
limit_num = models.IntegerField('行数限制', default=100)
priv_type = models.IntegerField('权限类型', choices=((1, 'DATABASE'), (2, 'TABLE'),), default=0)
is_deleted = models.IntegerField('是否删除', default=0)
create_time = models.DateTimeField(auto_now_add=True)
sys_time = models.DateTimeField(auto_now=True)
def __int__(self):
return self.privilege_id
class Meta:
db_table = 'query_privileges'
verbose_name = u'查询权限记录表'
verbose_name_plural = u'查询权限记录表'
# 记录在线查询sql的日志
class QueryLog(models.Model):
cluster_name = models.CharField('集群名称', max_length=50)
db_name = models.CharField('数据库名称', max_length=30)
sqllog = models.TextField('执行的sql查询')
effect_row = models.BigIntegerField('返回行数')
cost_time = models.CharField('执行耗时', max_length=10, default='')
username = models.CharField('操作人', max_length=30)
create_time = models.DateTimeField('操作时间', auto_now_add=True)
sys_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'query_log'
verbose_name = u'sql查询日志'
verbose_name_plural = u'sql查询日志'
# 脱敏字段配置
class DataMaskingColumns(models.Model):
column_id = models.AutoField('字段id', primary_key=True)
rule_type = models.IntegerField('规则类型',
choices=((1, '手机号'), (2, '证件号码'), (3, '银行卡'), (4, '邮箱'), (5, '金额'), (6, '其他')))
active = models.IntegerField('激活状态', choices=((0, '未激活'), (1, '激活')))
cluster_name = models.CharField('集群名称', max_length=50)
table_schema = models.CharField('字段所在库名', max_length=64)
table_name = models.CharField('字段所在表名', max_length=64)
column_name = models.CharField('字段名', max_length=64)
column_comment = models.CharField('字段描述', max_length=1024, default='', blank=True)
create_time = models.DateTimeField(auto_now_add=True)
sys_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'data_masking_columns'
verbose_name = u'脱敏字段配置'
verbose_name_plural = u'脱敏字段配置'
# 脱敏规则配置
class DataMaskingRules(models.Model):
rule_type = models.IntegerField('规则类型',
choices=((1, '手机号'), (2, '证件号码'), (3, '银行卡'), (4, '邮箱'), (5, '金额'), (6, '其他')), unique=True)
rule_regex = models.CharField('规则脱敏所用的正则表达式,表达式必须分组,隐藏的组会使用****代替', max_length=255)
hide_group = models.IntegerField('需要隐藏的组')
rule_desc = models.CharField('规则描述', max_length=100, default='', blank=True)
sys_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'data_masking_rules'
verbose_name = u'脱敏规则配置'
verbose_name_plural = u'脱敏规则配置'
# 记录阿里云的认证信息
class AliyunAccessKey(models.Model):
ak = models.CharField(max_length=50)
secret = models.CharField(max_length=100)
is_enable = models.IntegerField(choices=((1, '启用'), (2, '禁用')))
remark = models.CharField(max_length=50, default='', blank=True)
class Meta:
db_table = 'aliyun_access_key'
verbose_name = u'阿里云认证信息'
verbose_name_plural = u'阿里云认证信息'
def save(self, *args, **kwargs):
pc = Prpcrypt() # 初始化
self.ak = pc.encrypt(self.ak)
self.secret = pc.encrypt(self.secret)
super(AliyunAccessKey, self).save(*args, **kwargs)
# 阿里云rds配置信息
class AliyunRdsConfig(models.Model):
cluster_name = models.OneToOneField(master_config, db_constraint=False, to_field='cluster_name',
db_column='cluster_name', verbose_name='集群名称', unique=True)
rds_dbinstanceid = models.CharField('阿里云RDS实例ID', max_length=100)
def __int__(self):
return self.rds_dbinstanceid
class Meta:
db_table = 'aliyun_rds_config'
verbose_name = u'阿里云rds配置'
verbose_name_plural = u'阿里云rds配置'
# SlowQuery
class SlowQuery(models.Model):
checksum = models.BigIntegerField(primary_key=True)
fingerprint = models.TextField()
sample = models.TextField()
first_seen = models.DateTimeField(blank=True, null=True)
last_seen = models.DateTimeField(blank=True, null=True, db_index=True)
reviewed_by = models.CharField(max_length=20, blank=True, null=True)
reviewed_on = models.DateTimeField(blank=True, null=True)
comments = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'mysql_slow_query_review'
verbose_name = u'慢日志统计'
verbose_name_plural = u'慢日志统计'
# SlowQueryHistory
class SlowQueryHistory(models.Model):
hostname_max = models.CharField(max_length=64, null=False)
client_max = models.CharField(max_length=64, null=True)
user_max = models.CharField(max_length=64, null=False)
db_max = models.CharField(max_length=64, null=True, default=None)
bytes_max = models.CharField(max_length=64, null=True)
checksum = models.ForeignKey(SlowQuery, db_constraint=False, to_field='checksum', db_column='checksum')
sample = models.TextField()
ts_min = models.DateTimeField(db_index=True)
ts_max = models.DateTimeField()
ts_cnt = models.FloatField(blank=True, null=True)
query_time_sum = models.FloatField(db_column='Query_time_sum', blank=True, null=True)
query_time_min = models.FloatField(db_column='Query_time_min', blank=True, null=True)
query_time_max = models.FloatField(db_column='Query_time_max', blank=True, null=True)
query_time_pct_95 = models.FloatField(db_column='Query_time_pct_95', blank=True, null=True)
query_time_stddev = models.FloatField(db_column='Query_time_stddev', blank=True, null=True)
query_time_median = models.FloatField(db_column='Query_time_median', blank=True, null=True)
lock_time_sum = models.FloatField(db_column='Lock_time_sum', blank=True, null=True)
lock_time_min = models.FloatField(db_column='Lock_time_min', blank=True, null=True)
lock_time_max = models.FloatField(db_column='Lock_time_max', blank=True, null=True)
lock_time_pct_95 = models.FloatField(db_column='Lock_time_pct_95', blank=True, null=True)
lock_time_stddev = models.FloatField(db_column='Lock_time_stddev', blank=True, null=True)
lock_time_median = models.FloatField(db_column='Lock_time_median', blank=True, null=True)
rows_sent_sum = models.FloatField(db_column='Rows_sent_sum', blank=True, null=True)
rows_sent_min = models.FloatField(db_column='Rows_sent_min', blank=True, null=True)
rows_sent_max = models.FloatField(db_column='Rows_sent_max', blank=True, null=True)
rows_sent_pct_95 = models.FloatField(db_column='Rows_sent_pct_95', blank=True, null=True)
rows_sent_stddev = models.FloatField(db_column='Rows_sent_stddev', blank=True, null=True)
rows_sent_median = models.FloatField(db_column='Rows_sent_median', blank=True, null=True)
rows_examined_sum = models.FloatField(db_column='Rows_examined_sum', blank=True, null=True)
rows_examined_min = models.FloatField(db_column='Rows_examined_min', blank=True, null=True)
rows_examined_max = models.FloatField(db_column='Rows_examined_max', blank=True, null=True)
rows_examined_pct_95 = models.FloatField(db_column='Rows_examined_pct_95', blank=True, null=True)
rows_examined_stddev = models.FloatField(db_column='Rows_examined_stddev', blank=True, null=True)
rows_examined_median = models.FloatField(db_column='Rows_examined_median', blank=True, null=True)
rows_affected_sum = models.FloatField(db_column='Rows_affected_sum', blank=True, null=True)
rows_affected_min = models.FloatField(db_column='Rows_affected_min', blank=True, null=True)
rows_affected_max = models.FloatField(db_column='Rows_affected_max', blank=True, null=True)
rows_affected_pct_95 = models.FloatField(db_column='Rows_affected_pct_95', blank=True, null=True)
rows_affected_stddev = models.FloatField(db_column='Rows_affected_stddev', blank=True, null=True)
rows_affected_median = models.FloatField(db_column='Rows_affected_median', blank=True, null=True)
rows_read_sum = models.FloatField(db_column='Rows_read_sum', blank=True, null=True)
rows_read_min = models.FloatField(db_column='Rows_read_min', blank=True, null=True)
rows_read_max = models.FloatField(db_column='Rows_read_max', blank=True, null=True)
rows_read_pct_95 = models.FloatField(db_column='Rows_read_pct_95', blank=True, null=True)
rows_read_stddev = models.FloatField(db_column='Rows_read_stddev', blank=True, null=True)
rows_read_median = models.FloatField(db_column='Rows_read_median', blank=True, null=True)
merge_passes_sum = models.FloatField(db_column='Merge_passes_sum', blank=True, null=True)
merge_passes_min = models.FloatField(db_column='Merge_passes_min', blank=True, null=True)
merge_passes_max = models.FloatField(db_column='Merge_passes_max', blank=True, null=True)
merge_passes_pct_95 = models.FloatField(db_column='Merge_passes_pct_95', blank=True, null=True)
merge_passes_stddev = models.FloatField(db_column='Merge_passes_stddev', blank=True, null=True)
merge_passes_median = models.FloatField(db_column='Merge_passes_median', blank=True, null=True)
innodb_io_r_ops_min = models.FloatField(db_column='InnoDB_IO_r_ops_min', blank=True, null=True)
innodb_io_r_ops_max = models.FloatField(db_column='InnoDB_IO_r_ops_max', blank=True, null=True)
innodb_io_r_ops_pct_95 = models.FloatField(db_column='InnoDB_IO_r_ops_pct_95', blank=True, null=True)
innodb_io_r_ops_stddev = models.FloatField(db_column='InnoDB_IO_r_ops_stddev', blank=True, null=True)
innodb_io_r_ops_median = models.FloatField(db_column='InnoDB_IO_r_ops_median', blank=True, null=True)
innodb_io_r_bytes_min = models.FloatField(db_column='InnoDB_IO_r_bytes_min', blank=True, null=True)
innodb_io_r_bytes_max = models.FloatField(db_column='InnoDB_IO_r_bytes_max', blank=True, null=True)
innodb_io_r_bytes_pct_95 = models.FloatField(db_column='InnoDB_IO_r_bytes_pct_95', blank=True, null=True)
innodb_io_r_bytes_stddev = models.FloatField(db_column='InnoDB_IO_r_bytes_stddev', blank=True, null=True)
innodb_io_r_bytes_median = models.FloatField(db_column='InnoDB_IO_r_bytes_median', blank=True, null=True)
innodb_io_r_wait_min = models.FloatField(db_column='InnoDB_IO_r_wait_min', blank=True, null=True)
innodb_io_r_wait_max = models.FloatField(db_column='InnoDB_IO_r_wait_max', blank=True, null=True)
innodb_io_r_wait_pct_95 = models.FloatField(db_column='InnoDB_IO_r_wait_pct_95', blank=True, null=True)
innodb_io_r_wait_stddev = models.FloatField(db_column='InnoDB_IO_r_wait_stddev', blank=True, null=True)
innodb_io_r_wait_median = models.FloatField(db_column='InnoDB_IO_r_wait_median', blank=True, null=True)
innodb_rec_lock_wait_min = models.FloatField(db_column='InnoDB_rec_lock_wait_min', blank=True, null=True)
innodb_rec_lock_wait_max = models.FloatField(db_column='InnoDB_rec_lock_wait_max', blank=True, null=True)
innodb_rec_lock_wait_pct_95 = models.FloatField(db_column='InnoDB_rec_lock_wait_pct_95', blank=True, null=True)
innodb_rec_lock_wait_stddev = models.FloatField(db_column='InnoDB_rec_lock_wait_stddev', blank=True, null=True)
innodb_rec_lock_wait_median = models.FloatField(db_column='InnoDB_rec_lock_wait_median', blank=True, null=True)
innodb_queue_wait_min = models.FloatField(db_column='InnoDB_queue_wait_min', blank=True, null=True)
innodb_queue_wait_max = models.FloatField(db_column='InnoDB_queue_wait_max', blank=True, null=True)
innodb_queue_wait_pct_95 = models.FloatField(db_column='InnoDB_queue_wait_pct_95', blank=True, null=True)
innodb_queue_wait_stddev = models.FloatField(db_column='InnoDB_queue_wait_stddev', blank=True, null=True)
innodb_queue_wait_median = models.FloatField(db_column='InnoDB_queue_wait_median', blank=True, null=True)
innodb_pages_distinct_min = models.FloatField(db_column='InnoDB_pages_distinct_min', blank=True, null=True)
innodb_pages_distinct_max = models.FloatField(db_column='InnoDB_pages_distinct_max', blank=True, null=True)
innodb_pages_distinct_pct_95 = models.FloatField(db_column='InnoDB_pages_distinct_pct_95', blank=True, null=True)
innodb_pages_distinct_stddev = models.FloatField(db_column='InnoDB_pages_distinct_stddev', blank=True, null=True)
innodb_pages_distinct_median = models.FloatField(db_column='InnoDB_pages_distinct_median', blank=True, null=True)
qc_hit_cnt = models.FloatField(db_column='QC_Hit_cnt', blank=True, null=True)
qc_hit_sum = models.FloatField(db_column='QC_Hit_sum', blank=True, null=True)
full_scan_cnt = models.FloatField(db_column='Full_scan_cnt', blank=True, null=True)
full_scan_sum = models.FloatField(db_column='Full_scan_sum', blank=True, null=True)
full_join_cnt = models.FloatField(db_column='Full_join_cnt', blank=True, null=True)
full_join_sum = models.FloatField(db_column='Full_join_sum', blank=True, null=True)
tmp_table_cnt = models.FloatField(db_column='Tmp_table_cnt', blank=True, null=True)
tmp_table_sum = models.FloatField(db_column='Tmp_table_sum', blank=True, null=True)
tmp_table_on_disk_cnt = models.FloatField(db_column='Tmp_table_on_disk_cnt', blank=True, null=True)
tmp_table_on_disk_sum = models.FloatField(db_column='Tmp_table_on_disk_sum', blank=True, null=True)
filesort_cnt = models.FloatField(db_column='Filesort_cnt', blank=True, null=True)
filesort_sum = models.FloatField(db_column='Filesort_sum', blank=True, null=True)
filesort_on_disk_cnt = models.FloatField(db_column='Filesort_on_disk_cnt', blank=True, null=True)
filesort_on_disk_sum = models.FloatField(db_column='Filesort_on_disk_sum', blank=True, null=True)
class Meta:
managed = False
db_table = 'mysql_slow_query_review_history'
unique_together = ('hostname_max', 'ts_min')
verbose_name = u'慢日志明细'
verbose_name_plural = u'慢日志明细'
| python | 21,384 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .ssd_utils import *
import torch.nn.init as init
import os
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, option, phase, size, base, extras, head, num_classes):
super(SSD, self).__init__()
self.option = option
self.features = []
self.phase = phase
self.num_classes = num_classes
self.cfg = self.option.result['detector']
self.priorbox = PriorBox(self.cfg)
with torch.no_grad():
self.priors = self.priorbox.forward()
self.size = size
# SSD network
self.backbone = nn.Sequential(*base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45, self.cfg['variance'])
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
x = self.backbone(x)
# apply vgg up to conv4_3 relu
s = self.L2Norm(self.features[0])
sources.append(s)
# apply vgg up to fc7
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1,
self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
# Initialize
def initialize(self):
def weights_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
init.constant_(m.bias.data, 0.0)
self.extras.apply(weights_init)
self.loc.apply(weights_init)
self.conf.apply(weights_init)
# Hook
def get_features(self, _, inputs, outputs):
self.features.append(outputs)
def clear_features(self):
self.features = []
def get_hook(self, target_layers):
for name, param in self.backbone.named_children():
if name in target_layers:
setattr(self, 'hook_detector_%s' %name, param.register_forward_hook(self.get_features))
def remove_hook(self, target_layers):
for name in target_layers:
getattr(self, 'hook_detector_%s' %name).remove()
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def build_ssd(option, phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
base_, extras_, head_ = multibox(vgg(base[str(size)], 3),
add_extras(extras[str(size)], 1024),
mbox[str(size)], num_classes)
return SSD(option, phase, size, base_, extras_, head_, num_classes) | python | 8,147 |
# Generated by Django 3.2.6 on 2021-10-07 22:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| python | 930 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_stats(base_resource) :
ur""" Statistics for VPN virtual server resource.
"""
def __init__(self) :
self._name = ""
self._clearstats = ""
self._primaryipaddress = ""
self._primaryport = 0
self._type = ""
self._state = ""
self._totalrequests = 0
self._requestsrate = 0
self._totalresponses = 0
self._responsesrate = 0
self._totalrequestbytes = 0
self._requestbytesrate = 0
self._totalresponsebytes = 0
self._responsebytesrate = 0
@property
def name(self) :
ur"""Name of the virtual server for which to show detailed statistics.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the virtual server for which to show detailed statistics.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def state(self) :
ur"""Current state of the server. Possible values are UP, DOWN, UNKNOWN, OFS(Out of Service), TROFS(Transition Out of Service), TROFS_DOWN(Down When going Out of Service).
"""
try :
return self._state
except Exception as e:
raise e
@property
def requestbytesrate(self) :
ur"""Rate (/s) counter for totalrequestbytes.
"""
try :
return self._requestbytesrate
except Exception as e:
raise e
@property
def primaryipaddress(self) :
ur"""The IP address on which the service is running.
"""
try :
return self._primaryipaddress
except Exception as e:
raise e
@property
def totalrequests(self) :
ur"""Total number of requests received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalrequests
except Exception as e:
raise e
@property
def type(self) :
ur"""Protocol associated with the vserver.
"""
try :
return self._type
except Exception as e:
raise e
@property
def responsesrate(self) :
ur"""Rate (/s) counter for totalresponses.
"""
try :
return self._responsesrate
except Exception as e:
raise e
@property
def totalresponsebytes(self) :
ur"""Number of response bytes received by this service or virtual server.
"""
try :
return self._totalresponsebytes
except Exception as e:
raise e
@property
def primaryport(self) :
ur"""The port on which the service is running.
"""
try :
return self._primaryport
except Exception as e:
raise e
@property
def requestsrate(self) :
ur"""Rate (/s) counter for totalrequests.
"""
try :
return self._requestsrate
except Exception as e:
raise e
@property
def responsebytesrate(self) :
ur"""Rate (/s) counter for totalresponsebytes.
"""
try :
return self._responsebytesrate
except Exception as e:
raise e
@property
def totalresponses(self) :
ur"""Number of responses received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalresponses
except Exception as e:
raise e
@property
def totalrequestbytes(self) :
ur"""Total number of request bytes received on this service or virtual server.
"""
try :
return self._totalrequestbytes
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all vpnvserver_stats resources that are configured on netscaler.
"""
try :
obj = vpnvserver_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.name = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class vpnvserver_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver = [vpnvserver_stats() for _ in range(length)]
| python | 6,234 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage set of platform API endpoints."""
import datetime
import os
import click
from ._config import with_config
from ._options import argument_endpoint
@click.group(invoke_without_command=True)
@click.option('-v', '--verbose', count=True)
@with_config
@click.pass_context
def endpoint(ctx, config, verbose):
"""Manage set of platform API endpoints."""
if ctx.invoked_subcommand is None:
# TODO default_endpoint = config.get('core', {}).get('default')
for endpoint, values in config.get('endpoints', {}).items():
# TODO is_default = default_endpoint == endpoint
if not verbose:
click.echo(endpoint)
else:
click.echo('{endpoint}\t{url}'.format(
endpoint=endpoint, url=values.get('url', '')))
@endpoint.command(name='set-default')
@argument_endpoint
@with_config
@click.pass_context
def set_default(ctx, config, endpoint):
"""Set endpoint as default."""
config.setdefault('core', {})
config['core']['default'] = endpoint
| python | 1,807 |
"""Test cases for utils/check-package.
It does not inherit from infra.basetest.BRTest and therefore does not generate
a logfile. Only when the tests fail there will be output to the console.
The make target ('make check-package') is already used by the job
'check-package' and won't be tested here.
"""
import os
import subprocess
import unittest
import infra
def call_script(args, env, cwd):
"""Call a script and return stdout and stderr as lists."""
out, err = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
universal_newlines=True).communicate()
return out.splitlines(), err.splitlines()
class TestCheckPackage(unittest.TestCase):
"""Test the various ways the script can be called.
The script can be called either using relative path, absolute path or from
PATH.
The files to be checked can be passed as arguments using either relative
path or absolute path.
When in in-tree mode (without -b) some in-tree files and also all
out-of-tree files are ignored.
When in out-tree mode (with -b) the script does generate warnings for these
but ignores external.mk.
"""
WITH_EMPTY_PATH = {}
WITH_UTILS_IN_PATH = {"PATH": infra.basepath("utils") + ":" + os.environ["PATH"]}
relative = [
# base_script base_file rel_script rel_file rel_cwd
["utils/check-package", "package/atop/atop.mk", "./utils/check-package", "package/atop/atop.mk", ""],
["utils/check-package", "package/atop/atop.mk", "./utils/check-package", "./package/atop/atop.mk", ""],
["utils/check-package", "package/atop/atop.mk", "../../utils/check-package", "atop.mk", "package/atop"],
["utils/check-package", "package/atop/atop.mk", "../../utils/check-package", "./atop.mk", "package/atop"],
["utils/check-package", "package/atop/atop.mk", "../utils/check-package", "atop/atop.mk", "package"],
["utils/check-package", "package/atop/atop.mk", "../utils/check-package", "./atop/atop.mk", "package"],
["utils/check-package", "package/atop/Config.in", "./utils/check-package", "package/atop/Config.in", ""],
["utils/check-package", "package/atop/Config.in", "./utils/check-package", "./package/atop/Config.in", ""],
["utils/check-package", "package/atop/Config.in", "../../utils/check-package", "Config.in", "package/atop"],
["utils/check-package", "package/atop/Config.in", "../../utils/check-package", "./Config.in", "package/atop"],
["utils/check-package", "package/atop/Config.in", "../utils/check-package", "atop/Config.in", "package"],
["utils/check-package", "package/atop/Config.in", "../utils/check-package", "./atop/Config.in", "package"]]
def assert_file_was_processed(self, stderr):
"""Infer from check-package stderr if at least one file was processed
and fail otherwise."""
self.assertIn("lines processed", stderr[0], stderr)
processed = int(stderr[0].split()[0])
self.assertGreater(processed, 0)
def assert_file_was_ignored(self, stderr):
"""Infer from check-package stderr if no file was processed and fail
otherwise."""
self.assertIn("lines processed", stderr[0], stderr)
processed = int(stderr[0].split()[0])
self.assertEqual(processed, 0)
def assert_warnings_generated_for_file(self, stderr):
"""Infer from check-package stderr if at least one warning was generated
and fail otherwise."""
self.assertIn("warnings generated", stderr[1], stderr)
generated = int(stderr[1].split()[0])
self.assertGreater(generated, 0)
def test_run(self):
"""Test the various ways the script can be called in a simple top to
bottom sequence."""
# an intree file can be checked by the script called from relative path,
# absolute path and from PATH
for base_script, base_file, rel_script, rel_file, rel_cwd in self.relative:
abs_script = infra.basepath(base_script)
abs_file = infra.basepath(base_file)
cwd = infra.basepath(rel_cwd)
_, m = call_script([rel_script, rel_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([abs_script, rel_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script(["check-package", rel_file],
self.WITH_UTILS_IN_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([rel_script, abs_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([abs_script, abs_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script(["check-package", abs_file],
self.WITH_UTILS_IN_PATH, cwd)
self.assert_file_was_processed(m)
# some intree files are ignored
_, m = call_script(["./utils/check-package", "package/pkg-generic.mk"],
self.WITH_EMPTY_PATH, infra.basepath())
self.assert_file_was_ignored(m)
_, m = call_script(["./utils/check-package", "-b", "package/pkg-generic.mk"],
self.WITH_EMPTY_PATH, infra.basepath())
self.assert_file_was_processed(m)
# an out-of-tree file can be checked by the script called from relative
# path, absolute path and from PATH
for base_script, base_file, rel_script, rel_file, rel_cwd in self.relative:
abs_script = infra.basepath(base_script)
abs_file = infra.basepath(base_file)
cwd = infra.basepath(rel_cwd)
_, m = call_script([rel_script, "-b", rel_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([abs_script, "-b", rel_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script(["check-package", "-b", rel_file],
self.WITH_UTILS_IN_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([rel_script, "-b", abs_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script([abs_script, "-b", abs_file],
self.WITH_EMPTY_PATH, cwd)
self.assert_file_was_processed(m)
_, m = call_script(["check-package", "-b", abs_file],
self.WITH_UTILS_IN_PATH, cwd)
self.assert_file_was_processed(m)
# out-of-tree files are are ignored without -b but can generate warnings
# with -b
abs_path = infra.filepath("tests/utils/br2-external")
rel_file = "Config.in"
abs_file = os.path.join(abs_path, rel_file)
_, m = call_script(["check-package", rel_file],
self.WITH_UTILS_IN_PATH, abs_path)
self.assert_file_was_ignored(m)
_, m = call_script(["check-package", abs_file],
self.WITH_UTILS_IN_PATH, infra.basepath())
self.assert_file_was_ignored(m)
w, m = call_script(["check-package", "-b", rel_file],
self.WITH_UTILS_IN_PATH, abs_path)
self.assert_file_was_processed(m)
self.assert_warnings_generated_for_file(m)
self.assertIn("{}:1: empty line at end of file".format(rel_file), w)
w, m = call_script(["check-package", "-b", abs_file],
self.WITH_UTILS_IN_PATH, infra.basepath())
self.assert_file_was_processed(m)
self.assert_warnings_generated_for_file(m)
self.assertIn("{}:1: empty line at end of file".format(abs_file), w)
# external.mk is ignored only when in the root path of a br2-external
rel_file = "external.mk"
abs_file = os.path.join(abs_path, rel_file)
_, m = call_script(["check-package", "-b", rel_file],
self.WITH_UTILS_IN_PATH, abs_path)
self.assert_file_was_ignored(m)
_, m = call_script(["check-package", "-b", abs_file],
self.WITH_UTILS_IN_PATH, infra.basepath())
self.assert_file_was_ignored(m)
abs_path = infra.filepath("tests/utils/br2-external/package/external")
abs_file = os.path.join(abs_path, rel_file)
w, m = call_script(["check-package", "-b", rel_file],
self.WITH_UTILS_IN_PATH, abs_path)
self.assert_file_was_processed(m)
self.assert_warnings_generated_for_file(m)
self.assertIn("{}:1: should be 80 hashes (http://nightly.buildroot.org/#writing-rules-mk)".format(rel_file), w)
w, m = call_script(["check-package", "-b", abs_file],
self.WITH_UTILS_IN_PATH, infra.basepath())
self.assert_file_was_processed(m)
self.assert_warnings_generated_for_file(m)
self.assertIn("{}:1: should be 80 hashes (http://nightly.buildroot.org/#writing-rules-mk)".format(abs_file), w)
| python | 9,491 |
import aes128
import Title
import Titles
import Hex
from binascii import hexlify as hx, unhexlify as uhx
from struct import pack as pk, unpack as upk
from Fs.File import File
from hashlib import sha256
import Fs.Type
import os
import re
import pathlib
import Keys
import Config
import Print
import Nsps
from tqdm import tqdm
from Fs.BaseFs import BaseFs
MEDIA_SIZE = 0x200
indent = 1
tabs = '\t' * indent
class Pfs0(BaseFs):
def __init__(self, buffer, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
super(Pfs0, self).__init__(buffer, path, mode, cryptoType, cryptoKey, cryptoCounter)
if buffer:
self.size = int.from_bytes(buffer[0x48:0x50], byteorder='little', signed=False)
self.sectionStart = int.from_bytes(buffer[0x40:0x48], byteorder='little', signed=False)
def getHeader():
stringTable = '\x00'.join(file.name for file in self.files)
headerSize = 0x10 + len(self.files) * 0x18 + len(stringTable)
remainder = 0x10 - headerSize % 0x10
headerSize += remainder
h = b''
h += b'PFS0'
h += len(self.files).to_bytes(4, byteorder='little')
h += (len(stringTable)+remainder).to_bytes(4, byteorder='little')
h += b'\x00\x00\x00\x00'
stringOffset = 0
for f in range(len(self.files)):
header += f.offset.to_bytes(8, byteorder='little')
header += f.size.to_bytes(8, byteorder='little')
header += stringOffset.to_bytes(4, byteorder='little')
header += b'\x00\x00\x00\x00'
stringOffset += len(f.name) + 1
h += stringTable.encode()
h += remainder * b'\x00'
return h
def open(self, path = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
r = super(Pfs0, self).open(path, mode, cryptoType, cryptoKey, cryptoCounter)
self.rewind()
#self.setupCrypto()
#Print.info('cryptoType = ' + hex(self.cryptoType))
#Print.info('titleKey = ' + (self.cryptoKey.hex()))
#Print.info('cryptoCounter = ' + (self.cryptoCounter.hex()))
self.magic = self.read(4)
if self.magic != b'PFS0':
raise IOError('Not a valid PFS0 partition ' + str(self.magic))
fileCount = self.readInt32()
stringTableSize = self.readInt32()
self.readInt32() # junk data
self.seek(0x10 + fileCount * 0x18)
stringTable = self.read(stringTableSize)
stringEndOffset = stringTableSize
headerSize = 0x10 + 0x18 * fileCount + stringTableSize
self.files = []
for i in range(fileCount):
i = fileCount - i - 1
self.seek(0x10 + i * 0x18)
offset = self.readInt64()
size = self.readInt64()
nameOffset = self.readInt32() # just the offset
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
self.readInt32() # junk data
f = Fs.factory(name)
f._path = name
f.offset = offset
f.size = size
self.files.append(self.partition(offset + headerSize, f.size, f))
self.files.reverse()
'''
self.seek(0x10 + fileCount * 0x18)
stringTable = self.read(stringTableSize)
for i in range(fileCount):
if i == fileCount - 1:
self.files[i].name = stringTable[self.files[i].nameOffset:].decode('utf-8').rstrip(' \t\r\n\0')
else:
self.files[i].name = stringTable[self.files[i].nameOffset:self.files[i+1].nameOffset].decode('utf-8').rstrip(' \t\r\n\0')
'''
def get_cryptoType(self):
return self.cryptoType
def get_cryptoKey(self):
return self.cryptoKey
def get_cryptoCounter(self):
return self.cryptoCounter
def read_cnmt(self, path = None, mode = 'rb'):
cryptoType = self.get_cryptoType()
cryptoKey = self.get_cryptoKey()
cryptoCounter = self.get_cryptoCounter()
r = super(Pfs0, self).open(path, mode, cryptoType, cryptoKey, cryptoCounter)
self.rewind()
for cnmt in self:
f = Fs.factory(cnmt)
cnmt.rewind()
titleid=f.readInt64()
titleversion = cnmt.read(0x4)
cnmt.rewind()
cnmt.seek(0xE)
offset=cnmt.readInt16()
content_entries=cnmt.readInt16()
meta_entries=cnmt.readInt16()
cnmt.rewind()
cnmt.seek(0x20)
original_ID=cnmt.readInt64()
min_sversion=cnmt.readInt64()
Print.info('')
Print.info('...........................................')
Print.info('Reading: ' + str(cnmt._path))
Print.info('...........................................')
Print.info('titleid = ' + str(hx(titleid.to_bytes(8, byteorder='big'))))
Print.info('version = ' + str(int.from_bytes(titleversion, byteorder='little')))
Print.info('Table offset = '+ str(hx((offset+0x20).to_bytes(2, byteorder='big'))))
Print.info('number of content = '+ str(content_entries))
Print.info('number of meta entries = '+ str(meta_entries))
Print.info('Application id\Patch id = ' + str(hx(original_ID.to_bytes(8, byteorder='big'))))
Print.info('RequiredSystemVersion = ' + str(min_sversion))
cnmt.rewind()
cnmt.seek(0x20+offset)
#for i in range(content_entries):
# Print.info('........................')
# Print.info('Content number ' + str(i+1))
# Print.info('........................')
# vhash = cnmt.read(0x20)
# Print.info('hash =\t' + str(hx(vhash)))
# NcaId = cnmt.read(0x10)
# Print.info('NcaId =\t' + str(hx(NcaId)))
# size = cnmt.read(0x6)
# Print.info('Size =\t' + str(int.from_bytes(size, byteorder='little', signed=True)))
# ncatype = cnmt.read(0x1)
# Print.info('ncatype = ' + str(int.from_bytes(ncatype, byteorder='little', signed=True)))
# unknown = cnmt.read(0x1)
def printInfo(self, indent = 0):
maxDepth = 3
tabs = '\t' * indent
Print.info('\n%sPFS0\n' % (tabs))
super(Pfs0, self).printInfo( indent)
| python | 5,801 |
# -*- coding: utf-8 -*-
"""Wide Residual Network models for Keras.
# Reference
- [Wide Residual Networks](https://arxiv.org/abs/1605.07146)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.layers import Input, Conv2D
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.applications.imagenet_utils import _obtain_input_shape
import keras.backend as K
TH_WEIGHTS_PATH = 'https://github.com/titu1994/Wide-Residual-Networks/releases/download/v1.2/wrn_28_8_th_kernels_th_dim_ordering.h5'
TF_WEIGHTS_PATH = 'https://github.com/titu1994/Wide-Residual-Networks/releases/download/v1.2/wrn_28_8_tf_kernels_tf_dim_ordering.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/titu1994/Wide-Residual-Networks/releases/download/v1.2/wrn_28_8_th_kernels_th_dim_ordering_no_top.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/titu1994/Wide-Residual-Networks/releases/download/v1.2/wrn_28_8_tf_kernels_tf_dim_ordering_no_top.h5'
def WideResidualNetwork(depth=28, width=8, dropout_rate=0.0,
include_top=True, weights='cifar10',
input_tensor=None, input_shape=None,
classes=10):
"""Instantiate the Wide Residual Network architecture,
optionally loading weights pre-trained
on CIFAR-10. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
depth: number or layers in the DenseNet
width: multiplier to the ResNet width (number of filters)
dropout_rate: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
"cifar10" (pre-training on CIFAR-10)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `tf` dim ordering)
or `(3, 32, 32)` (with `th` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
"""
if weights not in {'cifar10', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `cifar10` '
'(pre-training on CIFAR-10).')
if weights == 'cifar10' and include_top and classes != 10:
raise ValueError('If using `weights` as CIFAR 10 with `include_top`'
' as true, `classes` should be 10')
if (depth - 4) % 6 != 0:
raise ValueError('Depth of the network must be such that (depth - 4)'
'should be divisible by 6.')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_dim_ordering(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_wide_residual_network(classes, img_input, include_top, depth, width,
dropout_rate)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='wide-resnet')
# load weights
if weights == 'cifar10':
if (depth == 28) and (width == 8) and (dropout_rate == 0.0):
# Default parameters match. Weights for this model exist:
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('wide_resnet_28_8_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('wide_resnet_28_8_th_dim_ordering_th_kernels_no_top.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('wide_resnet_28_8_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('wide_resnet_28_8_tf_dim_ordering_tf_kernels_no_top.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
def __conv1_block(input):
x = Conv2D(16, (3, 3), padding='same')(input)
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def __conv2_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(16 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(16 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def __conv3_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 32 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(32 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(32 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def ___conv4_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 64 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 64 * k:
init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 64 * k:
init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(64 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(64 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def __create_wide_residual_network(nb_classes, img_input, include_top, depth=28, width=8, dropout=0.0):
''' Creates a Wide Residual Network with specified parameters
Args:
nb_classes: Number of output classes
img_input: Input tensor or layer
include_top: Flag to include the last dense layer
depth: Depth of the network. Compute N = (n - 4) / 6.
For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
width: Width of the network.
dropout: Adds dropout if value is greater than 0.0
Returns:a Keras Model
'''
N = (depth - 4) // 6
x = __conv1_block(img_input)
nb_conv = 4
for i in range(N):
x = __conv2_block(x, width, dropout)
nb_conv += 2
x = MaxPooling2D((2, 2))(x)
for i in range(N):
x = __conv3_block(x, width, dropout)
nb_conv += 2
x = MaxPooling2D((2, 2))(x)
for i in range(N):
x = ___conv4_block(x, width, dropout)
nb_conv += 2
x = AveragePooling2D((8, 8))(x)
if include_top:
x = Flatten()(x)
x = Dense(nb_classes, activation='softmax')(x)
return x
| python | 11,279 |
#! /usr/bin/env python
import collections
import io
import math
import numpy as np
OneKelvinInKeV = 0.00000008617328149741 # 1 K = 8.617328149741e-8 keV
T9inKeV = 1e9 * OneKelvinInKeV # 1 GK = 86.17328149741 keV
class ReaclibLibrary:
def __init__(self, s=None):
self.chapters = {}
if s is not None:
self.parse(s)
def parse(self, s):
lines = io.StringIO(s).readlines()
chunks = {}
thischunk = ''
chapter = 0
while lines:
line = lines.pop(0)
if (line[:1]).isdigit():
if thischunk.strip() != '':
chunks.setdefault(chapter, []).append(thischunk)
chapter = int(line[:2])
thischunk = ''
thischunk += line
if thischunk.strip() != '':
chunks.setdefault(chapter, []).append(thischunk)
for i in chunks:
# print( ''.join(chunks[i]) )
w = ReaclibChapter(''.join(chunks[i]))
self.chapters[i] = w
def __str__(self):
result = ''
for i in self.chapters:
result += str(self.chapters[i])
return result
def find(self, targ=None, proj=None, reaction=None, reference=None, chapter=None):
for chapter in self.chapters:
pass
raise NotImplementedError("FIXME: write me")
def add_inverse_reactions_detailed_balance(self):
raise NotImplementedError("FIXME: write me")
class ReaclibChapter:
def __init__(self, s=None):
self.reacts = []
self.chapter = None
if s is not None:
self.parse(s)
def parse(self, s):
split = s.splitlines()
nchunks = len(split) / 4
for ichunk in range(0, len(split), 4):
chunk = split[ichunk:ichunk + 4]
header = chunk.pop(0)
set1 = chunk.pop(0)
set2 = chunk.pop(0)
set3 = chunk.pop(0)
r = ReaclibReact(set1 + set2 + set3)
self.reacts.append(r)
self.chapter = header
def __str__(self):
mymap2 = ''
for r in self.reacts:
mymap2 += '\n'.join(list(map(str, [self.chapter, r]))) + '\n'
return mymap2
class ReaclibReact:
def __init__(self, s=None):
self.original = s
self.set_label = None
self.rate = None
self.reverserate = None
self.Qval = None
self.e = collections.OrderedDict(((i, None) for i in range(1, 6)))
if s is not None:
self.parse(s)
def add_particle(self, p, slot):
pstring = p.rjust(5)
self.e[slot] = pstring
def add_coefficient(self, c, slot):
if slot == 0:
self.a_zero = ['', c]
elif slot == 1:
self.a_one = ['', c]
elif slot == 2:
self.a_two = ['', c]
elif slot == 3:
self.a_three = ['', c]
elif slot == 4:
self.a_four = ['', c]
elif slot == 5:
self.a_five = ['', c]
elif slot == 6:
self.a_six = ['', c]
else:
raise ValueError("Coefficient slots are 0-6, you entered %s" % str(slot))
def parse(self, s):
self.e[1] = s[5:10]
self.e[2] = s[10:15]
self.e[3] = s[15:20]
self.e[4] = s[20:25]
self.e[5] = s[25:30]
self.e[6] = s[30:35]
self.set_label = s[43:47]
self.rate = s[47]
self.reverserate = s[48]
self.Qval = [str(s[52:64]), float(s[52:64])]
# print( '/'+s[74:87]+'/' )
# print( '/'+s[87:100]+'/' )
self.a_zero = [str(s[74:87]), float(s[74:87])]
self.a_one = [str(s[87:100]), float(s[87:100])]
self.a_two = [str(s[100:113]), float(s[100:113])]
self.a_three = [str(s[113:126]), float(s[113:126])]
self.a_four = [str(s[148:161]), float(s[148:161])]
self.a_five = [str(s[161:174]), float(s[161:174])]
self.a_six = [str(s[174:187]), float(s[174:187])]
def __str__(self):
line1 = " " + self.e[1] + self.e[2] + self.e[3] + self.e[4] + self.e[5] + self.e[
6] + " " + self.set_label + self.rate + self.reverserate + " " + self.Qval[0] + " "
line2 = self.a_zero[0] + self.a_one[0] + self.a_two[0] + self.a_three[0] + " "
line3 = self.a_four[0] + self.a_five[0] + self.a_six[0] + " "
mymap = list(map(str, [line1, line2, line3]))
return "\n".join(mymap)
def evaluate(self, x, unit='T9'):
if unit == 'T9':
T = x
elif unit == 'keV':
T = x / T9inKeV
else:
raise NotImplementedError("unit must be 'keV' or 'T9', got %s" % unit)
logrrate = (
self.a_zero[1] +
(self.a_one[1] * pow(T, -1.0)) +
(self.a_two[1] * pow(T, -0.33333333333)) +
(self.a_three[1] * pow(T, 0.3333333333)) +
(self.a_four[1] * pow(T, 1)) +
(self.a_five[1] * pow(T, 1.66666666666)) +
(self.a_six[1] * math.log(T)))
return math.exp(logrrate)
def gnds_crossSection_to_Reaclib_ARR_coefficients(xs, useCovariance=False, verbose=False, minTemp=0.1, maxTemp=10.0,
numTemps=15):
from brownies.BNL.inter.metrics import computeAstrophysicalReactionRate
import pqu.PQU as pqu
# Set up temperature grid
Temp_inT9 = np.logspace(start=math.log(minTemp), stop=math.log(maxTemp),
num=numTemps) # [.1, .2, .3, .4, .5, 1, 1.5, 2.0, 5.0, 10.0]
Temp_inkeV = [T9inKeV * (T) for T in Temp_inT9]
# Set up array of reaction rates
b_matrix = []
for T in Temp_inkeV:
ARR = computeAstrophysicalReactionRate(xs, pqu.PQU(T, 'keV'), useCovariance=useCovariance)
b_matrix.append(np.log(ARR.getValue()))
b = np.array(b_matrix)
# Set up matrix of powers of temperatures so we can fit the coefficients
a_matrix = []
for T in Temp_inT9:
a_matrix_row = [1, pow(T, -1.0), pow(T, -1.0 / 3.0), pow(T, 1.0 / 3.0), pow(T, 1.0), pow(T, 5.0 / 3.0),
np.log(T)]
a_matrix.append(a_matrix_row)
a = np.array(a_matrix)
# a*x = b, solve for vector x
if verbose: print('b:', b)
if verbose: print('a:', a)
x, residual, rank, s = np.linalg.lstsq(a, b, rcond=None)
if verbose: print('a*x', np.dot(a, x))
if verbose: print(residual)
return x
def gnds_reactionSuite_to_ReaclibLibrary(rs, maxEThreshold=0.5e6, skipCrossSectionSums=True, verbose=False):
import fudge
projectile = str(rs.projectile)
target = str(rs.target)
resultChapters = []
for r in rs:
# Get rid of all reactions that are not plain reactions or sums of reactions
# (e.g. no fission components of production stuff)
if not isinstance(r, (fudge.reactions.reaction.reaction, fudge.sums.crossSectionSum)): continue
if isinstance(r, fudge.sums.crossSectionSum) and skipCrossSectionSums: continue
if not hasattr(r, 'outputChannel'): continue
# Compute outgoing particle names
prods = [str(p) for p in r.outputChannel]
reactionName = str(r)
# Get Ethreshold
if hasattr(r, "getThreshold"):
EThreshold = r.getThreshold(unit='eV')
else:
EThreshold = 0.0
# Skip over some reactions for which an astrophysical reaction rate is useless
if projectile in prods and target in prods: continue # skip elastic
if EThreshold > maxEThreshold: continue # skip high threshold reactions
isInelastic = False
for prod in prods:
if '(' in prod:
isInelastic = True # skip anything that's a discrete level excitation, we're not ready for isomers
break
if isInelastic: continue
print(20 * '-', str(r), 20 * '-')
# Get Q
if hasattr(r, 'getQ'):
Q = r.getQ(unit='eV')
else:
Q = 0.0
# Get coefficients in ARR parameterization
a = gnds_crossSection_to_Reaclib_ARR_coefficients(r.crossSection, verbose=False)
# Figure out relevant products and compute chapter
if 'photon' in prods: prods.remove('photon')
if len(prods) == 1:
chapter = 4
elif len(prods) == 2:
chapter = 5
elif len(prods) == 3:
chapter = 6
else:
continue
thisReaction = ReaclibReact()
thisReaction.set_label = 'endf'
thisReaction.rate = None
thisReaction.reverserate = ''
thisReaction.Qval = ['', Q]
thisReaction.add_particle(target, 1)
thisReaction.add_particle(projectile, 2)
for ip, product in enumerate(prods): thisReaction.add_particle(product, ip + 3)
for ic, c in enumerate(a): thisReaction.add_coefficient(c, ic)
print(prods, reactionName, a, chapter, Q, EThreshold)
# print( thisReaction )
thisChapter = ReaclibChapter()
thisChapter.chapter = chapter
thisChapter.reacts.append(thisReaction)
resultChapters.append(thisChapter)
return resultChapters
raise NotImplementedError("FIXME: write me")
def endfFile_to_ReaclibLibrary(filename, verbose=False):
from brownies.legacy.converting import endfFileToGNDS
rs = endfFileToGNDS.endfFileToGNDS(filename,
toStdOut=int(verbose) * 10,
skipBadData=True,
reconstructResonances=True,
continuumSpectraFix=True,
doCovariances=False,
verboseWarnings=verbose,
printBadNK14=False,
ignoreBadDate=True,
acceptBadMF10FissionZAP=True)['reactionSuite']
return gnds_reactionSuite_to_ReaclibLibrary(rs, verbose=verbose)
def plot_rates(d, temperatureGrid=[.1, .2, .3, .4, .5, 1, 1.5, 2, 5, 10], title="Astrophysical Reaction Rate"):
import matplotlib.pyplot as plt
''' d={ "ENDF/B-VIII.0":reactlibreac, 'KaDoNiS':...}'''
for k in d:
plt.plot(temperatureGrid, [d[k].evaluate(T) for T in temperatureGrid], label=k)
plt.xlabel('$Temperature (GK)$')
plt.ylabel('$ Astrophysical Reaction Rate(1e6)(cm^{3} s^{-1} mol^{-1})$')
plt.title(title)
plt.legend(loc='upper center', shadow=True, fontsize='x-large')
plt.show()
if __name__ == "__main__":
import unittest
class Test_ReaclibReact(unittest.TestCase):
def setUp(self):
self.s = " n p wc12w 7.82300e-01 \n" \
"-6.781610e+00 0.000000e+00 0.000000e+00 0.000000e+00 \n" \
" 0.000000e+00 0.000000e+00 0.000000e+00 \n"
# self.s=open('reaclibv1.txt').read()
def test_output(self):
r = ReaclibReact(self.s)
self.assertEqual(str(r), self.s)
def test_evaluate(self):
r = ReaclibReact(self.s)
self.assertAlmostEqual(r.evaluate(1.0), 4.0)
class Test_ReaclibChapter(unittest.TestCase):
def setUp(self):
self.s = \
"""1
n p wc12w 7.82300e-01
-6.781610e+00 0.000000e+00 0.000000e+00 0.000000e+00
0.000000e+00 0.000000e+00 0.000000e+00
1
t he3 wc12w 1.86000e-02
-2.014560e+01 0.000000e+00 0.000000e+00 0.000000e+00
0.000000e+00 0.000000e+00 0.000000e+00
1
he3 t ecw -1.90000e-02
-3.246200e+01-2.133800e-01-8.215810e-01 1.112410e+01
-5.773380e-01 2.904710e-02-2.627050e-01
"""
def test_output(self):
r = ReaclibChapter(self.s)
self.assertEqual(str(r), self.s)
class Test_ReaclibLibrary(unittest.TestCase):
def setUp(self):
self.s = ""
def test_output(self):
r = ReaclibLibrary(self.s)
self.assertEqual(str(r), self.s)
unittest.main()
| python | 12,791 |
import io
import os
import re
from setuptools import setup, find_packages
scriptFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(scriptFolder)
# Find version info from module (without importing the module):
with open("src/wizcoin/__init__.py", "r") as fileObj:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fileObj.read(), re.MULTILINE
).group(1)
# Use the README.md content for the long description:
with io.open("README.md", encoding="utf-8") as fileObj:
long_description = fileObj.read()
setup(
name="WizCoin",
version=version,
url="https://github.com/susieexample/wizcoin",
author="Al Sweigart",
author_email="[email protected]",
description=("""A Python module to represent the galleon, sickle, and knut coins of wizard currency."""),
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(where="src"),
package_dir={"": "src"},
test_suite="tests",
install_requires=[],
keywords="",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| python | 1,480 |
from django.db import connection
from django.core.management.base import BaseCommand
from pathlib import Path
class Command(BaseCommand):
"""
Command to import a csv which maps UPRNs to Local Authority GSS codes.
The csv should have two columns: 'uprn' and 'lad'.
You probably want to generate it with 'create_uprn_council_lookup'.
"""
def add_arguments(self, parser):
parser.add_argument("path", help="Path to CSV mapping UPRNs to GSS codes.")
def handle(self, *args, **kwargs):
self.path = Path(kwargs["path"])
self.table_name = "addressbase_uprntocouncil"
if not self.path.exists():
raise FileNotFoundError(f"No csv found at {kwargs['path']}")
cursor = connection.cursor()
self.stdout.write("clearing existing data..")
cursor.execute("TRUNCATE TABLE %s;" % (self.table_name))
self.stdout.write("importing from CSV..")
with self.path.open("r") as f:
cursor.copy_from(f, self.table_name, sep=",")
self.stdout.write("...done")
| python | 1,067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.