input
stringlengths
0
2.17k
instruction
stringlengths
18
2.94k
output
stringlengths
47
3.36k
from django.core.signing import TimestampSigner class TokenSigner: signer = TimestampSigner() def unsign(self, value): return self.signer.unsign(value=value, max_age=60 * 60 * 24) def sign(self, value): return self.signer.sign(value)
virus_scan: Increase valid period for token
from django.core.signing import TimestampSigner class TokenSigner: signer = TimestampSigner() def unsign(self, value): return self.signer.unsign(value=value, max_age=60 * 60 * 24 * 7) def sign(self, value): return self.signer.sign(value)
# -*- coding: utf-8 -*- ''' Start the reactor! ''' # Import salt libs import salt.utils.reactor # Import ioflo libs import ioflo.base.deeding @ioflo.base.deeding.deedify( 'SaltRaetReactorFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def reactor_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.reactor.Reactor, args=(self.opts.value,))
Add event return fork behavior
# -*- coding: utf-8 -*- ''' Start the reactor! ''' # Import salt libs import salt.utils.reactor import salt.utils.event # Import ioflo libs import ioflo.base.deeding @ioflo.base.deeding.deedify( 'SaltRaetReactorFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def reactor_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.reactor.Reactor, args=(self.opts.value,)) @ioflo.base.deeding.deedify( 'SaltRaetEventReturnFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def event_return_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.event.EventReturn, args=(self.opts.value,))
import yaml from yaml import SafeLoader yaml.load(payload) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load(payload, SafeLoader) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=SafeLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=yaml.BaseLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.safe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.safe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput
Python: Refactor PyYAML tests a bit
import yaml # Unsafe: yaml.load(payload) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load(payload, yaml.Loader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.unsafe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput # Safe yaml.load(payload, yaml.SafeLoader) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=yaml.SafeLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, yaml.BaseLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.safe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML # load_all variants yaml.load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.safe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput
from os.path import join, dirname, abspath default_base_dir = join(dirname(abspath(__file__)), 'completion') import run def pytest_addoption(parser): parser.addoption( "--base-dir", default=default_base_dir, help="Directory in which integration test case files locate.") parser.addoption( "--thirdparty", help="Include integration tests that requires third party modules.") def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.base_dir test_files = {} thirdparty = metafunc.config.option.thirdparty metafunc.parametrize( 'case', run.collect_dir_tests(base_dir, test_files, thirdparty))
Add --test-files option to py.test At this point, py.test should be equivalent to test/run.py
from os.path import join, dirname, abspath default_base_dir = join(dirname(abspath(__file__)), 'completion') import run def pytest_addoption(parser): parser.addoption( "--base-dir", default=default_base_dir, help="Directory in which integration test case files locate.") parser.addoption( "--test-files", "-T", default=[], action='append', help=( "Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. " "For example: -T generators.py:10,13,19. " "Note that you can use -m to specify the test case by id.")) parser.addoption( "--thirdparty", help="Include integration tests that requires third party modules.") def parse_test_files_option(opt): """ Parse option passed to --test-files into a key-value pair. >>> parse_test_files_option('generators.py:10,13,19') ('generators.py', [10, 13, 19]) """ opt = str(opt) if ':' in opt: (f_name, rest) = opt.split(':', 1) return (f_name, list(map(int, rest.split(',')))) else: return (opt, []) def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.base_dir test_files = dict(map(parse_test_files_option, metafunc.config.option.test_files)) thirdparty = metafunc.config.option.thirdparty metafunc.parametrize( 'case', run.collect_dir_tests(base_dir, test_files, thirdparty))
import pkg_resources from django.utils import six from django.utils.lru_cache import lru_cache @lru_cache() def get_backup_strategies(): entry_points = pkg_resources.get_entry_map('nodeconductor').get('backup_strategies', {}) strategies = dict((name.upper(), entry_point.load()) for name, entry_point in entry_points.iteritems()) return strategies def has_object_backup_strategy(obj): strategies = get_backup_strategies() return obj.__class__.__name__.upper() in strategies def get_object_backup_strategy(obj): strategies = get_backup_strategies() return strategies[obj.__class__.__name__.upper()] def get_backupable_models(): strategies = get_backup_strategies() return [strategy.get_model() for strategy in six.itervalues(strategies)]
Use new comprehension syntax and six (nc-263)
import pkg_resources from django.utils import six from django.utils.lru_cache import lru_cache @lru_cache() def get_backup_strategies(): entry_points = pkg_resources.get_entry_map('nodeconductor').get('backup_strategies', {}) strategies = {name.upper(): entry_point.load() for name, entry_point in six.iteritems(entry_points)} return strategies def has_object_backup_strategy(obj): strategies = get_backup_strategies() return obj.__class__.__name__.upper() in strategies def get_object_backup_strategy(obj): strategies = get_backup_strategies() return strategies[obj.__class__.__name__.upper()] def get_backupable_models(): strategies = get_backup_strategies() return [strategy.get_model() for strategy in six.itervalues(strategies)]
try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse from .app import settings from django.conf.urls.static import static try: from django.conf.urls import include, patterns, url except ImportError: # django < 1.5 compat from django.conf.urls.defaults import include, patterns, url # noqa image_path = urlparse(settings.BETTY_IMAGE_URL).path if image_path.startswith("/"): image_path = image_path[1:] urlpatterns = patterns('', url(r'^{0}'.format(image_path), include("betty.cropper.urls")), # noqa url(r'browser/', include("betty.image_browser.urls")), url(r'login/', "django.contrib.auth.views.login") ) if settings.DEBUG: urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
Append trailing slash to BETTY_IMAGE_URL if not present
try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse from .app import settings from django.conf.urls.static import static try: from django.conf.urls import include, patterns, url except ImportError: # django < 1.5 compat from django.conf.urls.defaults import include, patterns, url # noqa image_path = urlparse(settings.BETTY_IMAGE_URL).path if image_path.startswith("/"): image_path = image_path[1:] if not image_path.endswith("/"): image_path += "/" urlpatterns = patterns('', url(r'^{0}'.format(image_path), include("betty.cropper.urls")), # noqa url(r'browser/', include("betty.image_browser.urls")), url(r'login/', "django.contrib.auth.views.login") ) if settings.DEBUG: urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
import string import random from django.conf import settings def validate_settings(): assert settings.AWS, \ "No AWS settings found" assert settings.AWS.get('ACCESS_KEY'), \ "AWS access key is not set in settings" assert settings.AWS.get('SECRET_KEY'), \ "AWS secret key is not set in settings" assert settings.AWS.get('BUCKET'), \ "AWS bucket name is not set in settings" ID_FIELD_LENGTH = 24 alphabet = string.ascii_lowercase + string.digits for loser in 'l1o0': i = alphabet.index(loser) alphabet = alphabet[:i] + alphabet[i + 1:] def byte_to_base32_chr(byte): return alphabet[byte & 31] def random_id(): rand_id = [random.randint(0, 0xFF) for i in range(ID_FIELD_LENGTH)] return ''.join(map(byte_to_base32_chr, rand_id))
Make random IDs start with a letter
import string import random from django.conf import settings def validate_settings(): assert settings.AWS, \ "No AWS settings found" assert settings.AWS.get('ACCESS_KEY'), \ "AWS access key is not set in settings" assert settings.AWS.get('SECRET_KEY'), \ "AWS secret key is not set in settings" assert settings.AWS.get('BUCKET'), \ "AWS bucket name is not set in settings" ID_FIELD_LENGTH = 24 alphabet = string.ascii_lowercase + string.digits alphabet0 = string.ascii_lowercase + string.ascii_lowercase for loser in 'l1o0': i = alphabet.index(loser) alphabet = alphabet[:i] + alphabet[i + 1:] for loser in 'lo': i = alphabet0.index(loser) alphabet0 = alphabet0[:i] + alphabet0[i + 1:] def byte_to_base32_chr(byte): return alphabet[byte & 31] def byte_to_letter(byte): return alphabet0[byte & 31] def random_id(): rand_id = [random.randint(0, 0xFF) for i in range(ID_FIELD_LENGTH)] return (byte_to_letter(rand_id[0]) + ''.join(map(byte_to_base32_chr, rand_id[1:])))
#!/usr/bin/python # -*- coding: utf-8 -*- from django.conf import settings from django.conf.urls import patterns, include, url from django.contrib import admin urlpatterns = patterns( '', url(r'^', include('django.contrib.auth.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^web/', include('campus02.web.urls', namespace='web')), url(r'^', include('campus02.base.urls', namespace='base')), ) if settings.DEBUG: import debug_toolbar urlpatterns += patterns( '', url(r'^__debug__/', include(debug_toolbar.urls)), )
Fix URL routing for index page.
#!/usr/bin/python # -*- coding: utf-8 -*- from django.conf import settings from django.conf.urls import patterns, include, url from django.contrib import admin urlpatterns = patterns( '', url(r'^', include('django.contrib.auth.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^web/', include('campus02.web.urls', namespace='web')), url(r'^$', include('campus02.base.urls', namespace='base')), ) if settings.DEBUG: import debug_toolbar urlpatterns += patterns( '', url(r'^__debug__/', include(debug_toolbar.urls)), )
import grpc import hello_pb2 import hello_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') stub = hello_pb2_grpc.HelloServiceStub(channel) # ideally, you should have try catch block here too response = stub.SayHello(hello_pb2.HelloReq(Name='Euler')) print(response.Result) try: response = stub.SayHelloStrict(hello_pb2.HelloReq( Name='Leonhard Euler')) except grpc.RpcError as e: # ouch! # lets print the gRPC error message # which is "Length of `Name` cannot be more than 10 characters" print(e.details()) # lets access the error code, which is `INVALID_ARGUMENT` # `type` of `status_code` is `grpc.StatusCode` status_code = e.code() # should print `INVALID_ARGUMENT` print(status_code.name) # should print `(3, 'invalid argument')` print(status_code.value) else: print(response.Result) if __name__ == '__main__': run()
Update python version for better error handling
import grpc import hello_pb2 import hello_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') stub = hello_pb2_grpc.HelloServiceStub(channel) # ideally, you should have try catch block here too response = stub.SayHello(hello_pb2.HelloReq(Name='Euler')) print(response.Result) try: response = stub.SayHelloStrict(hello_pb2.HelloReq( Name='Leonhard Euler')) except grpc.RpcError as e: # ouch! # lets print the gRPC error message # which is "Length of `Name` cannot be more than 10 characters" print(e.details()) # lets access the error code, which is `INVALID_ARGUMENT` # `type` of `status_code` is `grpc.StatusCode` status_code = e.code() # should print `INVALID_ARGUMENT` print(status_code.name) # should print `(3, 'invalid argument')` print(status_code.value) # want to do some specific action based on the error? if grpc.StatusCode.INVALID_ARGUMENT == status_code: # do your stuff here pass else: print(response.Result) if __name__ == '__main__': run()
# -*- coding: utf-8 -*- """ Ziggy ~~~~~~~~ :copyright: (c) 2012 by Rhett Garber :license: ISC, see LICENSE for more details. """ __title__ = 'ziggy' __version__ = '0.0.1' __build__ = 0 __author__ = 'Rhett Garber' __license__ = 'ISC' __copyright__ = 'Copyright 2012 Rhett Garber' import logging from . import utils from . import network from .context import Context, set, append, add from . import context as _context_mod from .errors import Error from .timer import timeit log = logging.getLogger(__name__) def configure(host, port, recorder=None): """Initialize ziggy This instructs the ziggy system where to send it's logging data. If ziggy is not configured, log data will be silently dropped. Currently we support logging through the network (and the configured host and port) to a ziggyd instances, or to the specified recorder function """ global _record_function if recorder: context._recorder_function = recorder elif host and port: network.init(host, port) context._recorder_function = network.send else: log.warning("Empty ziggy configuration")
Allow unsetting of configuration (for testing)
# -*- coding: utf-8 -*- """ Ziggy ~~~~~~~~ :copyright: (c) 2012 by Rhett Garber :license: ISC, see LICENSE for more details. """ __title__ = 'ziggy' __version__ = '0.0.1' __build__ = 0 __author__ = 'Rhett Garber' __license__ = 'ISC' __copyright__ = 'Copyright 2012 Rhett Garber' import logging from . import utils from . import network from .context import Context, set, append, add from . import context as _context_mod from .errors import Error from .timer import timeit log = logging.getLogger(__name__) def configure(host, port, recorder=None): """Initialize ziggy This instructs the ziggy system where to send it's logging data. If ziggy is not configured, log data will be silently dropped. Currently we support logging through the network (and the configured host and port) to a ziggyd instances, or to the specified recorder function """ global _record_function if recorder: context._recorder_function = recorder elif host and port: network.init(host, port) context._recorder_function = network.send else: log.warning("Empty ziggy configuration") context._recorder_function = None
import os config = {} system_mongo_host = os.environ.get('MONGODB_PORT_27017_TCP_ADDR') system_elastic_host = os.environ.get('ELASTIC_PORT_9300_TCP_ADDR') config['HOST'] = '' config['PORT'] = 5000 config['MONGODB_HOST'] = system_mongo_host if system_mongo_host else 'localhost' config['MONGODB_PORT'] = 27017 config['ELASTIC_HOST'] = system_elastic_host if system_elastic_host else 'localhost' config['ELASTIC_PORT'] = 9200 config['ACCEPTED_ORIGINS'] = ['http://104.236.77.225', 'http://localhost:3000']
Add two new domains to whitelist for CORS.
import os config = {} system_mongo_host = os.environ.get('MONGODB_PORT_27017_TCP_ADDR') system_elastic_host = os.environ.get('ELASTIC_PORT_9300_TCP_ADDR') config['HOST'] = '' config['PORT'] = 5000 config['MONGODB_HOST'] = system_mongo_host if system_mongo_host else 'localhost' config['MONGODB_PORT'] = 27017 config['ELASTIC_HOST'] = system_elastic_host if system_elastic_host else 'localhost' config['ELASTIC_PORT'] = 9200 config['ACCEPTED_ORIGINS'] = ['http://beta.founderati.io', 'http://beta.thehookemup.com', 'http://104.236.77.225', 'http://localhost:3000']
#!/usr/bin/env python import os import signal import sys from app.main import app, queues, sched def _teardown(signal, frame): sched.shutdown(wait=False) for queue in queues.values(): queue.put(None) queues.clear() # Let the interrupt bubble up so that Flask/Werkzeug see it raise KeyboardInterrupt if __name__ == '__main__': if len(sys.argv) > 1 and sys.argv[1] == 'debug': app.debug = True signal.signal(signal.SIGINT, _teardown) port = int(os.environ.get('PORT', 5000)) app.run(host='0.0.0.0', port=port, use_reloader=False, threaded=True)
Configure logger in debug mode
#!/usr/bin/env python import logging import os import signal import sys from app.main import app, queues, sched def _teardown(signal, frame): sched.shutdown(wait=False) for queue in queues.values(): queue.put(None) queues.clear() # Let the interrupt bubble up so that Flask/Werkzeug see it raise KeyboardInterrupt if __name__ == '__main__': if len(sys.argv) > 1 and sys.argv[1] == 'debug': logging.basicConfig() app.debug = True signal.signal(signal.SIGINT, _teardown) port = int(os.environ.get('PORT', 5000)) app.run(host='0.0.0.0', port=port, use_reloader=False, threaded=True)
# -*- coding: utf-8 -*- from nimp.commands._command import * from nimp.utilities.build import * #------------------------------------------------------------------------------- class VsBuildCommand(Command): def __init__(self): Command.__init__(self, 'vs-build', 'Builds a Visual Studio project') #--------------------------------------------------------------------------- def configure_arguments(self, env, parser): parser.add_argument('solution', help = 'Solution file', metavar = '<FILE>') parser.add_argument('project', help = 'Project', metavar = '<FILE>', default = 'None') parser.add_argument('--target', help = 'Target', metavar = '<TARGET>', default = 'Build') parser.add_argument('-c', '--configuration', help = 'configuration to build', metavar = '<configuration>', default = 'release') parser.add_argument('-p', '--platform', help = 'platform to build', metavar = '<platform>', default = 'Win64') parser.add_argument('--vs-version', help = 'VS version to use', metavar = '<VERSION>', default = '12') return True #--------------------------------------------------------------------------- def run(self, env): return vsbuild(env.solution, env.platform, env.configuration, env.project, env.vs_version, env.target)
Use separate variable names for Visual Studio config/platform.
# -*- coding: utf-8 -*- from nimp.commands._command import * from nimp.utilities.build import * #------------------------------------------------------------------------------- class VsBuildCommand(Command): def __init__(self): Command.__init__(self, 'vs-build', 'Builds a Visual Studio project') #--------------------------------------------------------------------------- def configure_arguments(self, env, parser): parser.add_argument('solution', help = 'Solution file', metavar = '<FILE>') parser.add_argument('project', help = 'Project', metavar = '<FILE>', default = 'None') parser.add_argument('--target', help = 'Target', metavar = '<TARGET>', default = 'Build') parser.add_argument('-c', '--vs-configuration', help = 'configuration to build', metavar = '<vs-configuration>', default = 'release') parser.add_argument('-p', '--vs-platform', help = 'platform to build', metavar = '<vs-platform>', default = 'Win64') parser.add_argument('--vs-version', help = 'VS version to use', metavar = '<VERSION>', default = '12') return True #--------------------------------------------------------------------------- def run(self, env): return vsbuild(env.solution, env.vs_platform, env.vs_configuration, env.project, env.vs_version, env.target)
#!/usr/bin/env python from utils import file_templates from utils.validation import is_valid_gpu_mem def main(): gpu_mem = 0 while gpu_mem == 0: user_input = raw_input("Enter GPU memory in MB (16/32/64/128/256): ") if is_valid_gpu_mem(user_input): gpu_mem = user_input else: print("Acceptable memory values are: 16/32/64/128/256") update_file('/boot/config.txt', gpu_mem) def update_file(path, gpu_mem): data = { 'gpu_mem': gpu_mem } template_name = path.split('/')[-1] new_file_data = file_templates.build(template_name, data) with open(path, 'w') as f: f.write(new_file_data) if __name__ == '__main__': main()
Make GPU mem split optional
#!/usr/bin/env python from utils import file_templates from utils.validation import is_valid_gpu_mem def main(): user_input = raw_input("Want to change the GPU memory split? (Y/N): ") if user_input == 'Y': gpu_mem = 0 while gpu_mem == 0: mem_split = raw_input("Enter GPU memory in MB (16/32/64/128/256): ") if is_valid_gpu_mem(mem_split): gpu_mem = mem_split else: print("Acceptable memory values are: 16/32/64/128/256") update_file('/boot/config.txt', gpu_mem) else: print("Skipping GPU memory split...") def update_file(path, gpu_mem): data = { 'gpu_mem': gpu_mem } template_name = path.split('/')[-1] new_file_data = file_templates.build(template_name, data) with open(path, 'w') as f: f.write(new_file_data) if __name__ == '__main__': main()
import logging logger = logging.getLogger() logger.setLevel(logging.INFO) from ask import alexa def lambda_handler(request_obj, context=None): return alexa.route_request(request_obj) @alexa.default def default_handler(request): logger.info('default_handler') return alexa.respond('There were 42 accidents in 2016.') @alexa.request("LaunchRequest") def launch_request_handler(request): logger.info('launch_request_handler') return alexa.create_response(message='You can ask me about car accidents.') @alexa.request("SessionEndedRequest") def session_ended_request_handler(request): logger.info('session_ended_request_handler') return alexa.create_response(message="Goodbye!") @alexa.intent('AMAZON.CancelIntent') def cancel_intent_handler(request): logger.info('cancel_intent_handler') return alexa.create_response(message='ok', end_session=True) @alexa.intent('AMAZON.HelpIntent') def help_intent_handler(request): logger.info('help_intent_handler') return alexa.create_response(message='You can ask me about car accidents.') @alexa.intent('AMAZON.StopIntent') def stop_intent_handler(request): logger.info('stop_intent_handler') return alexa.create_response(message='ok', end_session=True)
Use respond instead of create_response
import logging logger = logging.getLogger() logger.setLevel(logging.INFO) from ask import alexa def lambda_handler(request_obj, context=None): return alexa.route_request(request_obj) @alexa.default def default_handler(request): logger.info('default_handler') return alexa.respond('There were 42 accidents in 2016.') @alexa.request("LaunchRequest") def launch_request_handler(request): logger.info('launch_request_handler') return alexa.respond('You can ask me about car accidents.') @alexa.request("SessionEndedRequest") def session_ended_request_handler(request): logger.info('session_ended_request_handler') return alexa.respond('Goodbye.') @alexa.intent('AMAZON.CancelIntent') def cancel_intent_handler(request): logger.info('cancel_intent_handler') return alexa.respond('Okay.', end_session=True) @alexa.intent('AMAZON.HelpIntent') def help_intent_handler(request): logger.info('help_intent_handler') return alexa.respond('You can ask me about car accidents.') @alexa.intent('AMAZON.StopIntent') def stop_intent_handler(request): logger.info('stop_intent_handler') return alexa.respond('Okay.', end_session=True)
#!/usr/bin/env python # coding=utf-8 import sys from kitchen.text.converters import getwriter from utils.log import getLogger, open_log, close_log from utils.misc import output_exception from system.factory_manager import Manager sys.stdout = getwriter('utf-8')(sys.stdout) sys.stderr = getwriter('utf-8')(sys.stderr) open_log("output.log") logger = getLogger("System") logger.info("Starting up..") try: manager = Manager() except Exception: logger.critical("Runtime error - process cannot continue!") output_exception(logger) finally: close_log("output.log") try: raw_input("Press enter to exit.") except: pass
Create logs folder if it doesn't exist (to prevent errors)
#!/usr/bin/env python # coding=utf-8 import os import sys from kitchen.text.converters import getwriter from utils.log import getLogger, open_log, close_log from utils.misc import output_exception from system.factory_manager import Manager sys.stdout = getwriter('utf-8')(sys.stdout) sys.stderr = getwriter('utf-8')(sys.stderr) if not os.path.exists("logs"): os.mkdir("logs") open_log("output.log") logger = getLogger("System") logger.info("Starting up..") try: manager = Manager() except Exception: logger.critical("Runtime error - process cannot continue!") output_exception(logger) finally: close_log("output.log") try: raw_input("Press enter to exit.") except: pass
import os import sys from django.conf import settings DIR_NAME = os.path.dirname(__file__) settings.configure( DEBUG=True, DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', } }, INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'cuser', ), ROOT_URLCONF='testss.CuserTestCase.urls', MIDDLEWARE_CLASSES = [ 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'cuser.middleware.CuserMiddleware', ] ) from django.test.simple import DjangoTestSuiteRunner test_runner = DjangoTestSuiteRunner(verbosity=2) failures = test_runner.run_tests(['cuser', ]) if failures: sys.exit(failures)
Fix running tests for both django 1.6 and 1.7
import os import sys import django from django.conf import settings DJANGO_VERSION = float('.'.join([str(i) for i in django.VERSION[0:2]])) DIR_NAME = os.path.dirname(__file__) settings.configure( DEBUG=True, DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', } }, INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'cuser', ), ROOT_URLCONF='testss.CuserTestCase.urls', MIDDLEWARE_CLASSES = [ 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'cuser.middleware.CuserMiddleware', ] ) from django.test.simple import DjangoTestSuiteRunner if DJANGO_VERSION >= 1.7: django.setup() test_runner = DjangoTestSuiteRunner(verbosity=2) failures = test_runner.run_tests(['cuser', ]) if failures: sys.exit(failures)
# Data sources database( thermoLibraries = ['primaryThermoLibrary'], reactionLibraries = [], seedMechanisms = [], kineticsDepositories = ['training'], kineticsFamilies = ['!Intra_Disproportionation','!Substitution_O'], kineticsEstimator = 'rate rules', ) # List of species species( label='ethane', reactive=True, structure=SMILES("CC"), ) # Reaction systems simpleReactor( temperature=(1350,'K'), pressure=(1.0,'bar'), initialMoleFractions={ "ethane": 1.0, }, terminationConversion={ 'ethane': 0.9, }, terminationTime=(1e6,'s'), ) solvation( solvent='water' ) simulator( atol=1e-16, rtol=1e-8, ) model( toleranceKeepInEdge=0.0, toleranceMoveToCore=0.1, toleranceInterruptSimulation=0.1, maximumEdgeSpecies=100000 ) options( units='si', saveRestartPeriod=None, drawMolecules=False, generatePlots=False, )
Remove solvent(water) from minimal example. Minimal should be just that - minimal. This hides issue #165
# Data sources database( thermoLibraries = ['primaryThermoLibrary'], reactionLibraries = [], seedMechanisms = [], kineticsDepositories = ['training'], kineticsFamilies = ['!Intra_Disproportionation','!Substitution_O'], kineticsEstimator = 'rate rules', ) # List of species species( label='ethane', reactive=True, structure=SMILES("CC"), ) # Reaction systems simpleReactor( temperature=(1350,'K'), pressure=(1.0,'bar'), initialMoleFractions={ "ethane": 1.0, }, terminationConversion={ 'ethane': 0.9, }, terminationTime=(1e6,'s'), ) simulator( atol=1e-16, rtol=1e-8, ) model( toleranceKeepInEdge=0.0, toleranceMoveToCore=0.1, toleranceInterruptSimulation=0.1, maximumEdgeSpecies=100000 ) options( units='si', saveRestartPeriod=None, drawMolecules=False, generatePlots=False, )
from flask import abort from .models import Service from .validation import get_validation_errors from .service_utils import filter_services def validate_brief_data(brief, enforce_required=True, required_fields=None): errs = get_validation_errors( 'briefs-{}-{}'.format(brief.framework.slug, brief.lot.slug), brief.data, enforce_required=enforce_required, required_fields=required_fields ) if errs: abort(400, errs) def is_supplier_eligible_for_brief(supplier, brief): services = filter_services( framework_slugs=[brief.framework.slug], statuses=["published"], lot_slug=brief.lot.slug, location=brief.data["location"], role=brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None ) services = services.filter(Service.supplier_id == supplier.supplier_id) return services.count() > 0
Add criteria weighting 100% total validation Checks the criteria weighting sum if all criteria fields are set. This relies on all three fields being required. If the fields don't add up to a 100 an error is added for each field that doesn't have any other validation errors.
from flask import abort from .models import Service from .validation import get_validation_errors from .service_utils import filter_services def validate_brief_data(brief, enforce_required=True, required_fields=None): errs = get_validation_errors( 'briefs-{}-{}'.format(brief.framework.slug, brief.lot.slug), brief.data, enforce_required=enforce_required, required_fields=required_fields ) criteria_weighting_keys = ['technicalWeighting', 'culturalWeighting', 'priceWeighting'] # Only check total if all weightings are set if all(key in brief.data for key in criteria_weighting_keys): criteria_weightings = sum(brief.data[key] for key in criteria_weighting_keys) if criteria_weightings != 100: for key in set(criteria_weighting_keys) - set(errs): errs[key] = 'total_should_be_100' if errs: abort(400, errs) def is_supplier_eligible_for_brief(supplier, brief): services = filter_services( framework_slugs=[brief.framework.slug], statuses=["published"], lot_slug=brief.lot.slug, location=brief.data["location"], role=brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None ) services = services.filter(Service.supplier_id == supplier.supplier_id) return services.count() > 0
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import Counter import pandas as pd XLS_NAME = 'startup.xls' SHEET_NAME = 'STARTUP_15092014' COL_NAME = 'nat.giuridica' def main(): xls = pd.ExcelFile(XLS_NAME) sheet = xls.parse(SHEET_NAME, index_col=None) for k,v in Counter(sheet[COL_NAME]).most_common(): print "%4d\t%s" % (v, k) if __name__ == '__main__': main()
Add pretty output for two more fields.
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import Counter import pandas as pd XLS_NAME = 'startup.xls' SHEET_NAME = 'STARTUP_15092014' def main(): xls = pd.ExcelFile(XLS_NAME) sheet = xls.parse(SHEET_NAME, index_col=None, convert_float=False) data = [el for el in sheet['nat.giuridica']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) print data = [el for el in sheet['classe di valore della produzione ultimo anno (1)'] if el in ['A', 'B', 'C', 'D', 'E']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) print data = [el for el in sheet['classe di addetti ultimo anno (2)'] if el in ['A', 'B', 'C', 'D', 'E']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) if __name__ == '__main__': main()
from flask import render_template, current_app, flash, redirect, url_for from . import main from forms import ContactForm from ..email import send_email @main.route('/') def index(): return render_template('index.html') @main.route('/about') def about(): return render_template('about.html') @main.route('/menu') def menu(): return render_template('menu.html') @main.route('/hours-and-directions') def hours(): return render_template('hours-and-directions.html') @main.route('/contact', methods=['GET', 'POST']) def contact(): contact_form = ContactForm() if contact_form.validate_on_submit(): name = contact_form.name.data email = contact_form.email.data phone = contact_form.phone.data message = contact_form.message.data send_email(current_app.config['MAIL_USERNAME'], 'Robata Grill Inquiry', 'mail/message', name=name, email=email, phone=phone, message=message) flash('Your message has been sent. We will be in contact with you shortly.') return redirect(url_for('main.contact')) return render_template('contact.html', contact_form = contact_form) @main.route('/imageScroll') def imageScroll(): return render_template('imageScroll.html')
Add additional view for sitemap.xml
from flask import render_template, current_app, flash, redirect, url_for, send_from_directory from . import main from forms import ContactForm from ..email import send_email @main.route('/<path:filename>') def static_from_root(filename): return send_from_directory(current_app.static_folder, filename) @main.route('/') def index(): return render_template('index.html') @main.route('/about') def about(): return render_template('about.html') @main.route('/menu') def menu(): return render_template('menu.html') @main.route('/hours-and-directions') def hours(): return render_template('hours-and-directions.html') @main.route('/contact', methods=['GET', 'POST']) def contact(): contact_form = ContactForm() if contact_form.validate_on_submit(): name = contact_form.name.data email = contact_form.email.data phone = contact_form.phone.data message = contact_form.message.data send_email(current_app.config['MAIL_USERNAME'], 'Robata Grill Inquiry', 'mail/message', name=name, email=email, phone=phone, message=message) flash('Your message has been sent. We will be in contact with you shortly.') return redirect(url_for('main.contact')) return render_template('contact.html', contact_form = contact_form) @main.route('/imageScroll') def imageScroll(): return render_template('imageScroll.html')
from __future__ import absolute_import, unicode_literals import os import pytest import kombu from .common import BasicFunctionality def get_connection( hostname, port, vhost): return kombu.Connection('amqp://{}:{}'.format(hostname, port)) @pytest.fixture() def connection(request): # this fixture yields plain connections to broker and TLS encrypted return get_connection( hostname=os.environ.get('RABBITMQ_HOST', 'localhost'), port=os.environ.get('RABBITMQ_5672_TCP', '5672'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", None), ) @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPBasicFunctionality(BasicFunctionality): pass
Use explicit py-amqp transport instead of amqp in integration tests
from __future__ import absolute_import, unicode_literals import os import pytest import kombu from .common import BasicFunctionality def get_connection( hostname, port, vhost): return kombu.Connection('pyamqp://{}:{}'.format(hostname, port)) @pytest.fixture() def connection(request): # this fixture yields plain connections to broker and TLS encrypted return get_connection( hostname=os.environ.get('RABBITMQ_HOST', 'localhost'), port=os.environ.get('RABBITMQ_5672_TCP', '5672'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", None), ) @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPBasicFunctionality(BasicFunctionality): pass
from rest_framework import viewsets from books.models import BookPage from books.serializers import BookPageSerializer class BookPageViewSet(viewsets.ModelViewSet): """ API endpoint that allows BookPages to be viewed or edited. """ queryset = BookPage.objects.all() serializer_class = BookPageSerializer
Order book pages by page number.
from rest_framework import viewsets from books.models import BookPage from books.serializers import BookPageSerializer class BookPageViewSet(viewsets.ModelViewSet): """ API endpoint that allows BookPages to be viewed or edited. """ queryset = BookPage.objects.order_by('page_number') serializer_class = BookPageSerializer
""" This file defines the types for type annotations. These names aren't part of the module namespace, but they are used in the annotations in the function signatures. The functions in the module are only valid for inputs that match the given type annotations. """ __all__ = ['Array', 'Device', 'Dtype', 'SupportsDLPack', 'SupportsBufferProtocol', 'PyCapsule'] from typing import Any, Sequence, Type, Union from . import (Array, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64) Array = ndarray Device = TypeVar('device') Dtype = Literal[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64] SupportsDLPack = TypeVar('SupportsDLPack') SupportsBufferProtocol = TypeVar('SupportsBufferProtocol') PyCapsule = TypeVar('PyCapsule')
Use better type definitions for the array API custom types
""" This file defines the types for type annotations. These names aren't part of the module namespace, but they are used in the annotations in the function signatures. The functions in the module are only valid for inputs that match the given type annotations. """ __all__ = ['Array', 'Device', 'Dtype', 'SupportsDLPack', 'SupportsBufferProtocol', 'PyCapsule'] from typing import Any, Sequence, Type, Union from . import (Array, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64) # This should really be recursive, but that isn't supported yet. See the # similar comment in numpy/typing/_array_like.py NestedSequence = Sequence[Sequence[Any]] Device = Any Dtype = Type[Union[[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]]] SupportsDLPack = Any SupportsBufferProtocol = Any PyCapsule = Any
from rdflib import * import unittest class test_normalisedString(unittest.TestCase): def test1(self): lit2 = Literal("\two\nw", datatype=XSD.normalizedString) lit = Literal("\two\nw", datatype=XSD.string) self.assertEqual(lit == lit2, False) def test2(self): lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString) st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string) self.assertFalse(Literal.eq(st,lit)) def test3(self): lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3() print(lit) self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>") if __name__ == "__main__": unittest.main()
Add a new test to test all chars that are getting replaced
from rdflib import Literal from rdflib.namespace import XSD import unittest class test_normalisedString(unittest.TestCase): def test1(self): lit2 = Literal("\two\nw", datatype=XSD.normalizedString) lit = Literal("\two\nw", datatype=XSD.string) self.assertEqual(lit == lit2, False) def test2(self): lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString) st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string) self.assertFalse(Literal.eq(st,lit)) def test3(self): lit = Literal("hey\nthere", datatype=XSD.normalizedString).n3() self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>") def test4(self): lit = Literal("hey\nthere\ta tab\rcarriage return", datatype=XSD.normalizedString) expected = Literal("""hey there a tab carriage return""", datatype=XSD.string) self.assertEqual(str(lit), str(expected)) if __name__ == "__main__": unittest.main()
from django.test import TestCase from django.test import Client from noveltorpedo.models import * import unittest from django.utils import timezone client = Client() class SearchTests(TestCase): def test_that_the_front_page_loads_properly(self): response = client.get('/') self.assertEqual(response.status_code, 200) self.assertContains(response, 'NovelTorpedo Search') def test_insertion_and_querying_of_data(self): author = Author() author.name = "Jack Frost" author.save() story = Story() story.title = "The Big One" story.save() story.authors.add(author) segment = StorySegment() segment.published = timezone.now() segment.story = story segment.title = "Chapter One" segment.contents = "This is how it all went down..." segment.save()
Rebuild index and test variety of queries
from django.test import TestCase from django.test import Client from noveltorpedo.models import * from django.utils import timezone from django.core.management import call_command client = Client() class SearchTests(TestCase): def test_that_the_front_page_loads_properly(self): response = client.get('/') self.assertEqual(response.status_code, 200) self.assertContains(response, 'NovelTorpedo Search') def test_insertion_and_querying_of_data(self): # Create a new story in the database. author = Author() author.name = 'Jack Frost' author.save() story = Story() story.title = 'The Big One' story.save() story.authors.add(author) segment = StorySegment() segment.published = timezone.now() segment.story = story segment.title = 'Chapter Three' segment.contents = 'This is how it all went down...' segment.save() # Index the new story. call_command('update_index') # Query via author name. response = client.get('/', {'q': 'Jack Frost'}) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Jack Frost') self.assertContains(response, 'The Big One') self.assertContains(response, 'Chapter Three') self.assertContains(response, 'This is how it all went down...') # Query via story name. response = client.get('/', {'q': 'The Big One'}) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Jack Frost') self.assertContains(response, 'The Big One') self.assertContains(response, 'Chapter Three') self.assertContains(response, 'This is how it all went down...') # Query via segment contents. response = client.get('/', {'q': 'Chapter Three'}) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Jack Frost') self.assertContains(response, 'The Big One') self.assertContains(response, 'Chapter Three') self.assertContains(response, 'This is how it all went down...')
import betamax import os with betamax.Betamax.configure() as config: config.cassette_library_dir = 'tests/cassettes' record_mode = 'once' if os.environ.get('TRAVIS_GH3'): record_mode = 'never' config.default_cassette_options['record_mode'] = record_mode config.define_cassette_placeholder( '<AUTH_TOKEN>', os.environ.get('GH_AUTH', 'xxxxxxxxxxx') )
Update the default value for the placeholder If I decide to start matching on headers this will be necessary
import betamax import os with betamax.Betamax.configure() as config: config.cassette_library_dir = 'tests/cassettes' record_mode = 'once' if os.environ.get('TRAVIS_GH3'): record_mode = 'never' config.default_cassette_options['record_mode'] = record_mode config.define_cassette_placeholder( '<AUTH_TOKEN>', os.environ.get('GH_AUTH', 'x' * 20) )
"""Tests for vectors.""" from sympy import sympify from drudge import Vec def test_vecs_has_basic_properties(): """Tests the basic properties of vector instances.""" base = Vec('v') v_ab = Vec('v', indices=['a', 'b']) v_ab_1 = base['a', 'b'] v_ab_2 = (base['a'])['b'] indices_ref = (sympify('a'), sympify('b')) hash_ref = hash(v_ab) str_ref = 'v[a, b]' repr_ref = "Vec('v', (a, b))" for i in [v_ab, v_ab_1, v_ab_2]: assert i.base == base.base assert i.indices == indices_ref assert hash(i) == hash_ref assert i == v_ab assert str(i) == str_ref assert repr(i) == repr_ref
Update tests for vectors for the new protocol Now the tests for vectors are updated for the new non backward compatible change for the concepts of label and base.
"""Tests for vectors.""" from sympy import sympify from drudge import Vec def test_vecs_has_basic_properties(): """Tests the basic properties of vector instances.""" base = Vec('v') v_ab = Vec('v', indices=['a', 'b']) v_ab_1 = base['a', 'b'] v_ab_2 = (base['a'])['b'] indices_ref = (sympify('a'), sympify('b')) hash_ref = hash(v_ab) str_ref = 'v[a, b]' repr_ref = "Vec('v', (a, b))" for i in [v_ab, v_ab_1, v_ab_2]: assert i.label == base.label assert i.base == base assert i.indices == indices_ref assert hash(i) == hash_ref assert i == v_ab assert str(i) == str_ref assert repr(i) == repr_ref
import requests from py2neo import Graph, authenticate class Neo4j: def __init__(self, host, port, username=None, password=None): self.host = host self.port = port self.username = username self.password = password self.host_port = "{host}:{port}".format(host=host, port=port) self.url = "http://{host_port}/db/data/".format(host_port=self.host_port) def connection(self): if self.username and self.password: authenticate(self.host_port, self.username, self.password) graph = Graph(self.url) return graph def cypher(self, query): tx = self.connection().cypher.begin() try: tx.append(query) results = tx.process() tx.commit() except Exception as e: results = e except KeyboardInterrupt: tx.rollback() results = "" return results def labels(self): return sorted(list(self.connection().node_labels)) def relationship_types(self): return sorted(list(self.connection().relationship_types)) def properties(self): url = self.url + "propertykeys" r = requests.get(url, auth=(self.username, self.password)) props = r.json() return sorted(props)
Remove host and port attributes from Neo4j
import requests from py2neo import Graph, authenticate class Neo4j: def __init__(self, host, port, username=None, password=None): self.username = username self.password = password self.host_port = "{host}:{port}".format(host=host, port=port) self.url = "http://{host_port}/db/data/".format(host_port=self.host_port) def connection(self): if self.username and self.password: authenticate(self.host_port, self.username, self.password) graph = Graph(self.url) return graph def cypher(self, query): tx = self.connection().cypher.begin() try: tx.append(query) results = tx.process() tx.commit() except Exception as e: results = e except KeyboardInterrupt: tx.rollback() results = "" return results def labels(self): return sorted(list(self.connection().node_labels)) def relationship_types(self): return sorted(list(self.connection().relationship_types)) def properties(self): url = self.url + "propertykeys" r = requests.get(url, auth=(self.username, self.password)) props = r.json() return sorted(props)
from django.core.management import call_command from celery import task @task def syncldap(): """ Call the appropriate management command to synchronize the LDAP users with the local database. """ call_command('syncldap')
Change Celery task to shared task
from django.core.management import call_command from celery import shared_task @shared_task def syncldap(): """ Call the appropriate management command to synchronize the LDAP users with the local database. """ call_command('syncldap')
from lxml import html from PIL import Image import requests def enlarge_image(image_file): image = Image.open(image_file) enlarged_size = map(lambda x: x*2, image.size) enlarged_image = image.resize(enlarged_size) return enlarged_image def extract_text(image_file): image = enlarge_image(image_file) # Use Tesseract to extract text from the enlarged image. Then Return it. domain = 'speedtest.net' page = requests.get('http://www.whois.com/whois/{}'.format(domain)) tree = html.fromstring(page.content)
Add functions to scrape whois data and fix the e-mails in it - Add function scrape_whois which scrapes the raw whois information for a given domain from http://www.whois.com/whois. - Add function fix_emails. http://www.whois.com hides the username-part of the contact e-mails from the whois info by displaying it as an image. This function fixes that using the extract_text function.
from lxml import html from PIL import Image import requests import urllib.request def enlarge_image(image_file): image = Image.open(image_file) enlarged_size = map(lambda x: x*2, image.size) enlarged_image = image.resize(enlarged_size) return enlarged_image def extract_text(image_file): image = enlarge_image(image_file) # Use Tesseract to extract text from the enlarged image. Then Return it. def fix_emails(whois_data, image_urls): count = 0 for index, item in enumerate(whois_data): if item.startswith('@'): with urllib.request.urlopen(image_urls[count]) as response: email_username = extract_text(image_urls[count]) whois_data[index-1:index+1] = [whois_data[index-1] + email_username + whois_data[index]] count += 1 return whois_data def scrape_whois(domain): domain = 'speedtest.net' page = requests.get('http://www.whois.com/whois/{}'.format(domain)) tree = html.fromstring(page.content) registrar_data = tree.xpath('//*[@id="registrarData"]/text()') registrar_images = list(map(lambda x: 'http://www.whois.com' + x, tree.xpath('//*[@id="registrarData"]/img/@src'))) registry_data = tree.xpath('//*[@id="registryData"]/text()') registry_images = list(map(lambda x: 'http://www.whois.com' + x, tree.xpath('//*[@id="registryData"]/img/@src')))
''' commands for the manager cli ''' from uuid import uuid4 import requests def command_puzzleboard_consume(**kwargs): url = kwargs['--consume-url'] name = kwargs['--name'] size = kwargs['--size'] data = f'{{"puzzleboard": "{name}", "size": {size}, correlation-id": "{uuid4()}"}}' print(data) r = requests.post(url, data) print(f'status_code={r.status_code}') print(f'reason={r.reason}') print(f'text={r.text}')
Make sure API payload is well-formed JSON
''' commands for the manager cli ''' from uuid import uuid4 import requests def command_puzzleboard_consume(**kwargs): url = kwargs['--consume-url'] name = kwargs['--name'] size = kwargs['--size'] data = f'{{"puzzleboard": "{name}", "size": {size}, "correlation-id": "{uuid4()}"}}' print(data) r = requests.post(url, data) print(f'status_code={r.status_code}') print(f'reason={r.reason}') print(f'text={r.text}')
# -*- coding: utf-8 -*- ############################################################################## # # Sales Channels # Copyright (C) 2016 June # 1200 Web Development # http://1200wd.com/ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, _, exceptions class AccountTax(models.Model): _inherit = 'account.tax' @api.model def _get_sales_channel_domain(self): ids = self.env.ref('res_partner_category.sales_channel').ids return [('category_id', 'in', ids)] sales_channel_id = fields.Many2one('res.partner', string="Sales channel", ondelete='set null', domain=_get_sales_channel_domain)
[IMP] Add constraint, tax name must be unique for each company and sales channel
# -*- coding: utf-8 -*- ############################################################################## # # Sales Channels # Copyright (C) 2016 June # 1200 Web Development # http://1200wd.com/ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, _, exceptions class AccountTax(models.Model): _inherit = 'account.tax' @api.model def _get_sales_channel_domain(self): ids = self.env.ref('res_partner_category.sales_channel').ids return [('category_id', 'in', ids)] sales_channel_id = fields.Many2one('res.partner', string="Sales channel", ondelete='set null', domain=_get_sales_channel_domain) _sql_constraints = [ ('name_company_uniq', 'unique(name, company_id, sales_channel_id)', 'Tax Name must be unique per company and sales channel!'), ]
from django import forms class LoginForm(forms.Form): email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder': 'Email', 'class': 'form-control', })) password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control', })) class SearchTrip(forms.Form): origin_id = forms.IntegerField() destination_id = forms.IntegerField() datetime = forms.DateTimeField()
Fix LoginForm to be conformant to builtin AuthenticationForm
from django.contrib.auth.forms import AuthenticationForm from django import forms class LoginForm(AuthenticationForm): username = forms.CharField(widget=forms.EmailInput(attrs={'placeholder': 'Email', 'class': 'form-control', })) password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control', })) class SearchTrip(forms.Form): origin_id = forms.IntegerField() destination_id = forms.IntegerField() datetime = forms.DateTimeField()
from signbank.registration.models import * from django.contrib import admin from django.core.urlresolvers import reverse class UserProfileAdmin(admin.ModelAdmin): list_display = ['user', 'permissions', 'best_describes_you', 'australian', 'auslan_user', 'deaf', 'researcher_credentials'] readonly_fields = ['user', 'australian', 'auslan_user', 'deaf', 'yob', 'postcode', 'best_describes_you', 'researcher_credentials', 'learned', 'schooltype', 'school', 'teachercomm'] list_filter = ['australian', 'auslan_user', 'deaf'] def permissions(self, obj): url = reverse('admin:auth_user_change', args=(obj.pk,)) return '<a href="%s">View user</a>' % (url) permissions.allow_tags = True admin.site.register(UserProfile, UserProfileAdmin)
Use the correct user ID to show the user from the profiles view. Fixes %61.
from signbank.registration.models import * from django.contrib import admin from django.core.urlresolvers import reverse class UserProfileAdmin(admin.ModelAdmin): list_display = ['user', 'permissions', 'best_describes_you', 'australian', 'auslan_user', 'deaf', 'researcher_credentials'] readonly_fields = ['user', 'australian', 'auslan_user', 'deaf', 'yob', 'postcode', 'best_describes_you', 'researcher_credentials', 'learned', 'schooltype', 'school', 'teachercomm'] list_filter = ['australian', 'auslan_user', 'deaf'] def permissions(self, obj): url = reverse('admin:auth_user_change', args=(obj.user.id,)) return '<a href="%s">View user</a>' % (url) permissions.allow_tags = True admin.site.register(UserProfile, UserProfileAdmin)
from datetime import datetime from flask import Flask import plenario.tasks as tasks def create_worker(): app = Flask(__name__) app.config.from_object('plenario.settings') app.url_map.strict_slashes = False @app.route('/update/weather', methods=['POST']) def weather(): return tasks.update_weather.delay().id @app.route('/update/often', methods=['POST']) def metar(): return tasks.update_metar.delay().id @app.route('/update/<frequency>', methods=['POST']) def update(frequency): return tasks.frequency_update.delay(frequency).id @app.route('/archive', methods=['POST']) def archive(): return tasks.archive.delay(datetime.now()).id @app.route('/resolve', methods=['POST']) def resolve(): return tasks.resolve.delay().id @app.route('/health', methods=['GET', 'POST']) def check_health(): return tasks.health.delay().id return app
Add temporary check to block production resolve
import os from datetime import datetime from flask import Flask import plenario.tasks as tasks def create_worker(): app = Flask(__name__) app.config.from_object('plenario.settings') app.url_map.strict_slashes = False @app.route('/update/weather', methods=['POST']) def weather(): return tasks.update_weather.delay().id @app.route('/update/often', methods=['POST']) def metar(): return tasks.update_metar.delay().id @app.route('/update/<frequency>', methods=['POST']) def update(frequency): return tasks.frequency_update.delay(frequency).id @app.route('/archive', methods=['POST']) def archive(): return tasks.archive.delay(datetime.now()).id @app.route('/resolve', methods=['POST']) def resolve(): if not os.environ.get('PRIVATE'): return 'hullo' return tasks.resolve.delay().id @app.route('/health', methods=['GET', 'POST']) def check_health(): return tasks.health.delay().id return app
#!/usr/bin/env python import json import rethinkdb as r import sys import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-p", "--port", dest="port", help="rethinkdb port", default=30815) parser.add_option("-f", "--file", dest="filename", help="json file", type="string") (options, args) = parser.parse_args() if options.filename is None: print "You must specify json file" sys.exit(1) conn = r.connect('localhost', int(options.port), db='materialscommons') json_data = open(options.filename) data = json.load(json_data) existing = r.table('templates').get(data['id']).run(conn) if existing: r.table('templates').get(data['id']).delete().run(conn) r.table('templates').insert(data).run(conn) print 'template deleted and re-inserted into the database' else: r.table('templates').insert(data).run(conn) print 'template inserted into the database'
Update script to show which file it is loading.
#!/usr/bin/env python import json import rethinkdb as r import sys import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-p", "--port", dest="port", help="rethinkdb port", default=30815) parser.add_option("-f", "--file", dest="filename", help="json file", type="string") (options, args) = parser.parse_args() if options.filename is None: print "You must specify json file" sys.exit(1) conn = r.connect('localhost', int(options.port), db='materialscommons') json_data = open(options.filename) print "Loading template file: %s" % (options.filename) data = json.load(json_data) existing = r.table('templates').get(data['id']).run(conn) if existing: r.table('templates').get(data['id']).delete().run(conn) r.table('templates').insert(data).run(conn) print 'template deleted and re-inserted into the database' else: r.table('templates').insert(data).run(conn) print 'template inserted into the database'
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.db import models class PeerReviewAssignmentManager(models.Manager): def from_course(self, course): return self.get_query_set().filter( kq__unit__course=course).order_by('kq__unit__order')
Sort by kq too when returning peer review assignments
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.db import models class PeerReviewAssignmentManager(models.Manager): def from_course(self, course): return self.get_query_set().filter( kq__unit__course=course).order_by( 'kq__unit__order', 'kq__order')
import pytest from thefuck.rules.git_push import match, get_new_command from tests.utils import Command @pytest.fixture def stderr(): return '''fatal: The current branch master has no upstream branch. To push the current branch and set the remote as upstream, use git push --set-upstream origin master ''' def test_match(stderr): assert match(Command('git push', stderr=stderr)) assert match(Command('git push master', stderr=stderr)) assert not match(Command('git push master')) assert not match(Command('ls', stderr=stderr)) def test_get_new_command(stderr): assert get_new_command(Command('git push', stderr=stderr))\ == "git push --set-upstream origin master"
Check arguments are preserved in git_push
import pytest from thefuck.rules.git_push import match, get_new_command from tests.utils import Command @pytest.fixture def stderr(): return '''fatal: The current branch master has no upstream branch. To push the current branch and set the remote as upstream, use git push --set-upstream origin master ''' def test_match(stderr): assert match(Command('git push', stderr=stderr)) assert match(Command('git push master', stderr=stderr)) assert not match(Command('git push master')) assert not match(Command('ls', stderr=stderr)) def test_get_new_command(stderr): assert get_new_command(Command('git push', stderr=stderr))\ == "git push --set-upstream origin master" assert get_new_command(Command('git push --quiet', stderr=stderr))\ == "git push --set-upstream origin master --quiet"
#!/usr/bin/env python2.3 """ Read a maf file from stdin and write out a new maf with only blocks having all of the required in species, after dropping any other species and removing columns containing only gaps. usage: %prog species,species2,... < maf """ import psyco_full import bx.align.maf import copy import sys from itertools import * def main(): species = sys.argv[1].split( ',' ) maf_reader = bx.align.maf.Reader( sys.stdin ) maf_writer = bx.align.maf.Writer( sys.stdout ) for m in maf_reader: new_components = [] for comp in m.components: if comp.src.split( '.' )[0] in species: new_components.append( comp ) m.components = new_components if len( m.components ) > 1: maf_writer.write( m ) maf_reader.close() maf_writer.close() if __name__ == "__main__": main()
Remove all-gap columns after removing rows of the alignment
#!/usr/bin/env python2.3 """ Read a maf file from stdin and write out a new maf with only blocks having all of the required in species, after dropping any other species and removing columns containing only gaps. usage: %prog species,species2,... < maf """ import psyco_full import bx.align.maf import copy import sys from itertools import * def main(): species = sys.argv[1].split( ',' ) maf_reader = bx.align.maf.Reader( sys.stdin ) maf_writer = bx.align.maf.Writer( sys.stdout ) for m in maf_reader: new_components = [] for comp in m.components: if comp.src.split( '.' )[0] in species: new_components.append( comp ) m.components = new_components m.remove_all_gap_columns() if len( m.components ) > 1: maf_writer.write( m ) maf_reader.close() maf_writer.close() if __name__ == "__main__": main()
from django.conf.urls import include, url from django.contrib import admin from django.conf import settings import pretix.control.urls import pretix.presale.urls urlpatterns = [ url(r'^control/', include(pretix.control.urls, namespace='control')), url(r'^admin/', include(admin.site.urls)), # The pretixpresale namespace is configured at the bottom of this file, because it # contains a wildcard-style URL which has to be configured _after_ debug settings. ] if settings.DEBUG: import debug_toolbar urlpatterns.append( url(r'^__debug__/', include(debug_toolbar.urls)), ) urlpatterns.append( url(r'', include(pretix.presale.urls, namespace='presale')) )
Allow plugins to register URLs
import importlib from django.apps import apps from django.conf.urls import include, url from django.contrib import admin from django.conf import settings import pretix.control.urls import pretix.presale.urls urlpatterns = [ url(r'^control/', include(pretix.control.urls, namespace='control')), url(r'^admin/', include(admin.site.urls)), # The pretixpresale namespace is configured at the bottom of this file, because it # contains a wildcard-style URL which has to be configured _after_ debug settings. ] if settings.DEBUG: import debug_toolbar urlpatterns.append( url(r'^__debug__/', include(debug_toolbar.urls)), ) for app in apps.get_app_configs(): if hasattr(app, 'PretixPluginMeta'): try: urlmod = importlib.import_module(app.name + '.urls') urlpatterns.append( url(r'', include(urlmod, namespace='plugins')) ) except ImportError: pass urlpatterns.append( url(r'', include(pretix.presale.urls, namespace='presale')) )
import unicodedata from django.forms import fields class XMLCompatCharField(fields.CharField): """ Strip 'control characters', as XML 1.0 does not allow them and the API may return data in XML. """ def to_python(self, value): value = super().to_python(value=value) return self.remove_control_characters(value) @staticmethod def remove_control_characters(str): return "".join(ch for ch in str if unicodedata.category(ch)[0] != "C")
Allow linebreaks textareas (should be valid in XML)
import unicodedata from django.forms import fields class XMLCompatCharField(fields.CharField): """ Strip 'control characters', as XML 1.0 does not allow them and the API may return data in XML. """ def to_python(self, value): value = super().to_python(value=value) return self.remove_control_characters(value) @staticmethod def remove_control_characters(input): valid_chars = ['\n', '\r'] return "".join(ch for ch in input if unicodedata.category(ch)[0] != "C" or ch in valid_chars)
# -*- encoding:utf8 -*- import os from model.oandapy import oandapy class OrderBook(object): def get_latest_orderbook(self, instrument, period, history): oanda_token = os.environ.get('OANDA_TOKEN') oanda = oandapy.API(environment="practice", access_token=oanda_token) orders = oanda.get_orderbook(instrument=instrument) try: timeset = orders.keys() timeset.sort() timeset.reverse() target_time = timeset[history] except: return None order = orders[target_time] order['time'] = target_time return order
Add oanda environment selector from runtime environments.
# -*- encoding:utf8 -*- import os from model.oandapy import oandapy class OrderBook(object): def get_latest_orderbook(self, instrument, period, history): oanda_token = os.environ.get('OANDA_TOKEN') oanda_environment = os.environ.get('OANDA_ENVIRONMENT', 'practice') oanda = oandapy.API(environment=oanda_environment, access_token=oanda_token) orders = oanda.get_orderbook(instrument=instrument) try: timeset = orders.keys() timeset.sort() timeset.reverse() target_time = timeset[history] except: return None order = orders[target_time] order['time'] = target_time return order
import json import hashlib from wdim import orm from wdim.orm import fields from wdim.orm import exceptions class Blob(orm.Storable): HASH_METHOD = 'sha256' _id = fields.StringField(unique=True) data = fields.DictField() @classmethod async def create(cls, data): sha = hashlib.new(cls.HASH_METHOD, json.dumps(data).encode('utf-8')).hexdigest() try: # Classmethod supers need arguments for some reason return await super(Blob, cls).create(_id=sha, data=data) except exceptions.UniqueViolation: return await cls.load(sha) @property def hash(self): return self._id
Allow Blob to be accessed with __getitem__
import json import hashlib from typing import Any, Dict from wdim import orm from wdim.orm import fields from wdim.orm import exceptions class Blob(orm.Storable): HASH_METHOD = 'sha256' _id = fields.StringField(unique=True) data = fields.DictField() @classmethod async def create(cls, data: Dict[str, Any]) -> 'Blob': sha = hashlib.new(cls.HASH_METHOD, json.dumps(data).encode('utf-8')).hexdigest() try: # Classmethod supers need arguments for some reason return await super(Blob, cls).create(_id=sha, data=data) except exceptions.UniqueViolation: return await cls.load(sha) @property def hash(self) -> str: return self._id def __getitem__(self, key): return self.data[key]
# Copyright 2008 Red Hat, Inc. # This file is part of python-fedora # # python-fedora is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # python-fedora is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with python-fedora; if not, see <http://www.gnu.org/licenses/> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # ''' Python Fedora Modules to communicate with and help implement Fedora Services. ''' import gettext translation = gettext.translation('python-fedora', '/usr/share/locale', fallback=True) _ = translation.ugettext from fedora import release __version__ = release.VERSION # Needed for our unit tests from fedora.wsgi.test import websetup __all__ = ('_', 'release', '__version__', 'accounts', 'client', 'tg', 'websetup')
Undo the webtest import... it's causing runtime failiure and unittests are currently broken anyway.
# Copyright 2008 Red Hat, Inc. # This file is part of python-fedora # # python-fedora is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # python-fedora is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with python-fedora; if not, see <http://www.gnu.org/licenses/> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # ''' Python Fedora Modules to communicate with and help implement Fedora Services. ''' import gettext translation = gettext.translation('python-fedora', '/usr/share/locale', fallback=True) _ = translation.ugettext from fedora import release __version__ = release.VERSION __all__ = ('_', 'release', '__version__', 'accounts', 'client', 'tg', 'websetup')
import re LATEX_ERR_RE = re.compile(r'(?P<filename>[^:]+):(?P<line>[0-9]*):' r'\s*(?P<error>.*)') def parse_log(log, context_size=3): lines = log.split('\n') errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n+context_size] err['line'] = int(err['line']) errors.append(err) return errors
Use splitlines instead of split on new line.
import re LATEX_ERR_RE = re.compile(r'(?P<filename>[^:]+):(?P<line>[0-9]*):' r'\s*(?P<error>.*)') def parse_log(log, context_size=3): lines = log.splitlines() errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n+context_size] err['line'] = int(err['line']) errors.append(err) return errors
# -*- coding: utf-8 -*- from nose.tools import assert_equal from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import * from openfisca_core.periods import * from openfisca_france import FranceTaxBenefitSystem def test_coefficient_proratisation_only_contract_periods(): tax_benefit_system = FranceTaxBenefitSystem() scenario = tax_benefit_system.new_scenario() scenario.init_single_entity(period='2017-11', parent1=dict(salaire_de_base=2300, effectif_entreprise=1, code_postal_entreprise="75001", categorie_salarie=u'prive_non_cadre', contrat_de_travail_debut='2017-11-1', contrat_de_travail_fin='2017-11-30', allegement_fillon_mode_recouvrement=u'progressif')) simulation = scenario.new_simulation() assert_equal(simulation.calculate('coefficient_proratisation','2017-11'),1) assert_equal(simulation.calculate('coefficient_proratisation','2017-12'),0) assert_equal(simulation.calculate('coefficient_proratisation','2017-10'),0) assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),1)
Fix evaluation date & test period
# -*- coding: utf-8 -*- from nose.tools import assert_equal from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import * from openfisca_core.periods import * from openfisca_france import FranceTaxBenefitSystem def test_coefficient_proratisation_only_contract_periods(): tax_benefit_system = FranceTaxBenefitSystem() scenario = tax_benefit_system.new_scenario() scenario.init_single_entity(period='2017-11', parent1=dict(salaire_de_base=2300, effectif_entreprise=1, code_postal_entreprise="75001", categorie_salarie=u'prive_non_cadre', contrat_de_travail_debut='2017-11-1', contrat_de_travail_fin='2017-12-01', allegement_fillon_mode_recouvrement=u'progressif')) simulation = scenario.new_simulation() assert_equal(simulation.calculate('coefficient_proratisation','2017-11'),1) assert_equal(simulation.calculate('coefficient_proratisation','2017-12'),0) assert_equal(simulation.calculate('coefficient_proratisation','2017-10'),0) assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),1)
from django.db import models class Device(models.Model): """Model for FfxOS devices data.""" model = models.CharField(max_length=120) manufacturer = models.CharField(max_length=120) def __unicode__(self): return '{0}, {1}'.format(self.manufacturer, self.model)
Order devices by manufacturer and model.
from django.db import models class Device(models.Model): """Model for FfxOS devices data.""" model = models.CharField(max_length=120) manufacturer = models.CharField(max_length=120) def __unicode__(self): return '{0}, {1}'.format(self.manufacturer, self.model) class Meta: ordering = ['manufacturer', 'model']
# -*- coding: utf-8 -*- from django.apps import AppConfig from django.conf import settings from django.db.models.signals import m2m_changed, post_migrate, post_save, pre_delete from neomodel import config config.AUTO_INSTALL_LABELS = False class ChemTrailsConfig(AppConfig): name = 'chemtrails' def ready(self): from .signals.handlers import ( m2m_changed_handler, post_migrate_handler, post_save_handler, pre_delete_handler ) m2m_changed.connect(receiver=m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') post_save.connect(receiver=post_save_handler, dispatch_uid='chemtrails.signals.handlers.post_save_handler') pre_delete.connect(receiver=pre_delete_handler, dispatch_uid='chemtrails.signals.handlers.pre_delete_handler') post_migrate.connect(receiver=post_migrate_handler, dispatch_uid='neomodel.core.install_all_labels') # Neo4j config config.DATABASE_URL = getattr(settings, 'NEOMODEL_NEO4J_BOLT_URL', config.DATABASE_URL) config.FORCE_TIMEZONE = getattr(settings, 'NEOMODEL_FORCE_TIMEZONE', False)
Read Neo4j config from ENV if present
# -*- coding: utf-8 -*- import os from django.apps import AppConfig from django.conf import settings from django.db.models.signals import m2m_changed, post_migrate, post_save, pre_delete from neomodel import config config.AUTO_INSTALL_LABELS = False class ChemTrailsConfig(AppConfig): name = 'chemtrails' def ready(self): from .signals.handlers import ( m2m_changed_handler, post_migrate_handler, post_save_handler, pre_delete_handler ) m2m_changed.connect(receiver=m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') post_save.connect(receiver=post_save_handler, dispatch_uid='chemtrails.signals.handlers.post_save_handler') pre_delete.connect(receiver=pre_delete_handler, dispatch_uid='chemtrails.signals.handlers.pre_delete_handler') post_migrate.connect(receiver=post_migrate_handler, dispatch_uid='neomodel.core.install_all_labels') # Neo4j config config.DATABASE_URL = getattr(settings, 'NEOMODEL_NEO4J_BOLT_URL', os.environ.get('NEOMODEL_NEO4J_BOLT_URL', config.DATABASE_URL)) config.FORCE_TIMEZONE = getattr(settings, 'NEOMODEL_FORCE_TIMEZONE', os.environ.get('NEOMODEL_FORCE_TIMEZONE', False))
from website.app import init_app from website.models import Node, User from framework import Q from framework.analytics import piwik app = init_app("website.settings", set_backends=True) # NOTE: This is a naive implementation for migration, requiring a POST request # for every user and every node. It is possible to bundle these together in a # single request, but it would require duplication of logic and strict error # checking of the result. Doing it this way is idempotent, and allows any # exceptions raised to halt the process with a usable error message. for user in User.find(): if user.piwik_token: continue piwik.create_user(user) for node in Node.find(Q('is_public', 'eq', True), Q('is_deleted', 'eq', False)): if node.piwik_site_id: continue piwik._provision_node(node)
Update to latest version of ODM: Join queries with `&`, not `,`
from website.app import init_app from website.models import Node, User from framework import Q from framework.analytics import piwik app = init_app("website.settings", set_backends=True) # NOTE: This is a naive implementation for migration, requiring a POST request # for every user and every node. It is possible to bundle these together in a # single request, but it would require duplication of logic and strict error # checking of the result. Doing it this way is idempotent, and allows any # exceptions raised to halt the process with a usable error message. for user in User.find(): if user.piwik_token: continue piwik.create_user(user) for node in Node.find(Q('is_public', 'eq', True) & Q('is_deleted', 'eq', False)): if node.piwik_site_id: continue piwik._provision_node(node)
""" Settings for DEMO_MODE. Must set DEMO_MODE = True in local_settings.py. """ # Views that are visible in demo mode. DEMO_SAFE_VIEWS = [ 'main.views.home_view', 'main.views.project_list_view', 'main.views.project_view', 'main.views.tab_root_analyze', 'main.views.reference_genome_list_view', 'main.views.reference_genome_view', 'main.views.sample_list_view', 'main.views.alignment_list_view', 'main.views.alignment_view', 'main.views.sample_alignment_error_view', 'main.views.variant_set_list_view', 'main.views.variant_set_view', 'main.views.single_variant_view', 'main.xhr_handlers.get_variant_list', 'main.xhr_handlers.get_variant_set_list', 'main.xhr_handlers.get_gene_list', 'main.xhr_handlers.get_alignment_groups', 'main.xhr_handlers.is_materialized_view_valid', 'main.xhr_handlers.get_ref_genomes', 'main.xhr_handlers.compile_jbrowse_and_redirect', 'main.template_xhrs.variant_filter_controls', 'main.demo_view_overrides.login_demo_account', 'django.contrib.auth.views.logout' ]
Allow refresh materialized view in DEMO_MODE.
""" Settings for DEMO_MODE. Must set DEMO_MODE = True in local_settings.py. """ # Views that are visible in demo mode. DEMO_SAFE_VIEWS = [ 'main.views.home_view', 'main.views.project_list_view', 'main.views.project_view', 'main.views.tab_root_analyze', 'main.views.reference_genome_list_view', 'main.views.reference_genome_view', 'main.views.sample_list_view', 'main.views.alignment_list_view', 'main.views.alignment_view', 'main.views.sample_alignment_error_view', 'main.views.variant_set_list_view', 'main.views.variant_set_view', 'main.views.single_variant_view', 'main.xhr_handlers.get_variant_list', 'main.xhr_handlers.get_variant_set_list', 'main.xhr_handlers.get_gene_list', 'main.xhr_handlers.refresh_materialized_variant_table', 'main.xhr_handlers.get_alignment_groups', 'main.xhr_handlers.is_materialized_view_valid', 'main.xhr_handlers.get_ref_genomes', 'main.xhr_handlers.compile_jbrowse_and_redirect', 'main.template_xhrs.variant_filter_controls', 'main.demo_view_overrides.login_demo_account', 'django.contrib.auth.views.logout' ]
from django.forms.widgets import Input, ClearableFileInput from django.template.loader import render_to_string class CIImgWidget(ClearableFileInput): def render(self, name, value, attrs=None): try: attrs["data-value"] = getattr(value, "url", "") except ValueError: # attribute has no file associated with it. attrs["data-value"] = "" return super(CIImgWidget, self).render(name, value, attrs) class CIThumbnailWidget(Input): input_type = "text" def render(self, name, value, attrs=None, renderer=None): if attrs: attrs.update(self.attrs) attrs["type"] = "hidden" input_field = super(CIThumbnailWidget, self).render(name, value, attrs) return render_to_string("cropimg/cropimg_widget.html", { "name": name, "value": value, "attrs": attrs, "input_field": input_field }) class Media: js = ("cropimg/js/jquery_init.js", "cropimg/js/cropimg.jquery.js", "cropimg/js/cropimg_init.js") css = {"all": ["cropimg/resource/cropimg.css"]}
Make sure that the admin widget also supports Django 2
from django.forms.widgets import Input, ClearableFileInput from django.template.loader import render_to_string class CIImgWidget(ClearableFileInput): def render(self, name, value, attrs=None, renderer=None, **kwargs): try: attrs["data-value"] = getattr(value, "url", "") except ValueError: # attribute has no file associated with it. attrs["data-value"] = "" return super(CIImgWidget, self).render(name, value, attrs) class CIThumbnailWidget(Input): input_type = "text" def render(self, name, value, attrs=None, renderer=None, **kwargs): if attrs: attrs.update(self.attrs) attrs["type"] = "hidden" input_field = super(CIThumbnailWidget, self).render(name, value, attrs) return render_to_string("cropimg/cropimg_widget.html", { "name": name, "value": value, "attrs": attrs, "input_field": input_field }) class Media: js = ("cropimg/js/jquery_init.js", "cropimg/js/cropimg.jquery.js", "cropimg/js/cropimg_init.js") css = {"all": ["cropimg/resource/cropimg.css"]}
from django import template from blanc_basic_news.news.models import Category, Post register = template.Library() @register.assignment_tag def get_news_categories(): return Category.objects.all() @register.assignment_tag def get_news_months(): return Post.objects.dates('date', 'month')
Add a template tag to get the latest news posts.
from django import template from django.utils import timezone from blanc_basic_news.news.models import Category, Post register = template.Library() @register.assignment_tag def get_news_categories(): return Category.objects.all() @register.assignment_tag def get_news_months(): return Post.objects.dates('date', 'month') @register.assignment_tag def get_latest_news(count): return Post.objects.select_related().filter( published=True, date__lte=timezone.now())[:count]
#!/usr/bin/env python import os from serf_master import SerfHandlerProxy from base_handler import BaseHandler try: from my_handler import MyHandler except ImportError: print "Could not import user's handler." print "Defaulting to dummy handler." MyHandler = BaseHandler if __name__ == '__main__': handler = SerfHandlerProxy() handler.register(os.environ.get('ROLE', 'no_role'), MyHandler()) handler.run()
Set 'no_role' if role is not given
#!/usr/bin/env python import os from serf_master import SerfHandlerProxy from base_handler import BaseHandler try: from my_handler import MyHandler except ImportError: print "Could not import user's handler." print "Defaulting to dummy handler." MyHandler = BaseHandler if __name__ == '__main__': handler = SerfHandlerProxy() role = os.environ.get('ROLE') or 'no_role' handler.register(role, MyHandler()) handler.run()
import sublime, sublime_plugin import os from .Mouse import MouseCommand class AcmePlumbingSend(MouseCommand): """ Sends the current selected text to the plumbing """ def run(self, edit): file_name = self.view.file_name() message = { "data": self.view.substr(self.selection_at_cursor()), "cwd": os.path.dirname(file_name) if file_name else None, "src": self.view.id(), } self.remove_selection("1") # in case it was expanded self.view.sel().clear() self.view.run_command("acme_plumbing", message)
Remove artefact from earlier left mouse button selection You used to be able to select with the left mouse button and then right click. You can't now.
import sublime, sublime_plugin import os from .Mouse import MouseCommand class AcmePlumbingSend(MouseCommand): """ Sends the current selected text to the plumbing """ def run(self, edit): file_name = self.view.file_name() message = { "data": self.view.substr(self.selection_at_cursor()), "cwd": os.path.dirname(file_name) if file_name else None, "src": self.view.id(), } self.view.sel().clear() self.view.run_command("acme_plumbing", message)
from django.contrib.auth.models import User from django.db import models from django.db.models import fields from django.utils import timezone class Transaction(models.Model): EXPENSE = 'exp' INCOME = 'inc' CATEGORY_CHOICES = ( (EXPENSE, 'expense'), (INCOME, 'income'), ) title = fields.CharField(max_length=255) amount = fields.DecimalField(max_digits=10, decimal_places=2) category = fields.CharField(max_length=3, choices=CATEGORY_CHOICES) created = fields.DateTimeField(auto_now=True) modified = fields.DateTimeField(default=timezone.now) user = models.ForeignKey(User) def __str__(self): return "{}".format(self.title)
Set created time with default callback auto_now is evil, as any editing and overriding is almost completely impossible (e.g. unittesting)
from django.contrib.auth.models import User from django.db import models from django.db.models import fields from django.utils import timezone class Transaction(models.Model): EXPENSE = 'exp' INCOME = 'inc' CATEGORY_CHOICES = ( (EXPENSE, 'expense'), (INCOME, 'income'), ) title = fields.CharField(max_length=255) amount = fields.DecimalField(max_digits=10, decimal_places=2) category = fields.CharField(max_length=3, choices=CATEGORY_CHOICES) created = fields.DateTimeField(default=timezone.now, editable=False) modified = fields.DateTimeField(default=timezone.now) user = models.ForeignKey(User) def __str__(self): return "{}".format(self.title)
#!/usr/bin/env python # # Copyright 2010 Hunter Freyer and Michael Kelly # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.appengine.ext import webapp from google.appengine.ext.webapp import util import html_handlers import models def main(): # TODO(mjkelly): Clean up these handlers. application = webapp.WSGIApplication([ ("/", html_handlers.MainPageHandler), ("/about", html_handlers.AboutHandler), ("/make", html_handlers.MakeHandler), ("/make.do", html_handlers.MakeSubmitHandler), ("/mymfks", html_handlers.MyMfksHandler), ("/vote/(.*)", html_handlers.VoteHandler), ("/vote.do", html_handlers.VoteSubmitHandler), ("/i/(.*)", html_handlers.EntityImageHandler), ("/.*", html_handlers.CatchAllHandler), ]) util.run_wsgi_app(application) if __name__ == '__main__': main()
Remove TODO -- handlers have been cleaned up.
#!/usr/bin/env python # # Copyright 2010 Hunter Freyer and Michael Kelly # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.appengine.ext import webapp from google.appengine.ext.webapp import util import html_handlers import models def main(): application = webapp.WSGIApplication([ ("/", html_handlers.MainPageHandler), ("/about", html_handlers.AboutHandler), ("/make", html_handlers.MakeHandler), ("/make.do", html_handlers.MakeSubmitHandler), ("/mymfks", html_handlers.MyMfksHandler), ("/vote/(.*)", html_handlers.VoteHandler), ("/vote.do", html_handlers.VoteSubmitHandler), ("/i/(.*)", html_handlers.EntityImageHandler), ("/.*", html_handlers.CatchAllHandler), ]) util.run_wsgi_app(application) if __name__ == '__main__': main()
import os from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) OPS_KEY = os.environ.get("OPS_KEY") OPS_SECRET = os.environ.get("OPS_SECRET") TWITTER_CONSUMER_ACCESS = os.environ['TWITTER_CONSUMER_ACCESS'] TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET'] TWITTER_ACCESS = os.environ['TWITTER_ACCESS'] TWITTER_SECRET = os.environ['TWITTER_SECRET']
Access envvars using standard dictionary access isntead of get method to ensure missing vars cause an exception to be raised
import os from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) OPS_KEY = os.environ["OPS_KEY"] OPS_SECRET = os.environ["OPS_SECRET"] TWITTER_CONSUMER_ACCESS = os.environ['TWITTER_CONSUMER_ACCESS'] TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET'] TWITTER_ACCESS = os.environ['TWITTER_ACCESS'] TWITTER_SECRET = os.environ['TWITTER_SECRET']
import socket class Socket: TCP = 0 UDP = 1 def __init__(self, type): if type == self.TCP: self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) else: self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.s.setblocking(False) def connect(self, host, port): self.s.connect((host, port)) def close(self): self.s.close() def send(self, data): self.s.sendall(data) def recv(self, num): try: return self.s.recv(num) except BlockingIOError: pass def get_address(self): return self.s.getsockname()[0] def get_port(self): return self.s.getsockname()[1]
Add a few functions to Socket class
import socket class Socket: TCP = 0 UDP = 1 def __init__(self, type): if type == self.TCP: self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) else: self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.s.setblocking(False) def connect(self, host, port): self.s.connect((host, port)) def close(self): self.s.close() def send(self, data): self.s.sendall(data) def recv(self, num): try: return self.s.recv(num) except BlockingIOError: pass def bind(self, addr=("", 0)): self.s.bind(addr) def sendto(self, data, addr): self.s.sendto(data, addr) def recvfrom(self, num): try: return self.s.recvfrom(num) except BlockingIOError: return None, None def get_address(self): return self.s.getsockname()[0] def get_port(self): return self.s.getsockname()[1]
from consts.notification_type import NotificationType from helpers.model_to_dict import ModelToDict from notifications.base_notification import BaseNotification class MatchScoreNotification(BaseNotification): def __init__(self, match): self.match = match self._event_feed = match.event.id # TODO Add notion of District to Match model? @property def _type(self): return NotificationType.MATCH_SCORE def _build_dict(self): data = {} data['message_type'] = NotificationType.type_names[self._type] data['message_data'] = {} data['message_data']['event_name'] = self.match.event.get().name data['message_data']['match'] = ModelToDict.matchConverter(self.match) return data
Add district feed to match score notification
from consts.notification_type import NotificationType from helpers.model_to_dict import ModelToDict from notifications.base_notification import BaseNotification class MatchScoreNotification(BaseNotification): def __init__(self, match): self.match = match self.event = match.event.get() self._event_feed = self.event.key_name self._district_feed = self.event.event_district_enum @property def _type(self): return NotificationType.MATCH_SCORE def _build_dict(self): data = {} data['message_type'] = NotificationType.type_names[self._type] data['message_data'] = {} data['message_data']['event_name'] = self.event.name data['message_data']['match'] = ModelToDict.matchConverter(self.match) return data
from kombu import Exchange, Queue import os BROKER_URL = os.environ['CELERY_BROKER'] CELERY_RESULT_BACKEND='rpc://' CELERY_RESULT_PERSISTENT = True CELERY_DISABLE_RATE_LIMITS = True CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] imputation_exchange = Exchange('imputation', type='direct') ancestry_exchange = Exchange('ancestry',type='direct') riskprediction_exchange = Exchange('riskprediction',type='direct') CELERY_QUEUES = ( Queue('imputation', imputation_exchange, routing_key='imputation'), Queue('ancestry', ancestry_exchange, routing_key='ancestry'), Queue('riskprediction',riskprediction_exchange,routing_key='riskprediction') ) CELERY_ROUTES = { 'thehonestgenepipeline.imputation.test':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.imputation':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.convert':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.impute':{'queue':'imputation'}, 'thehonestgenepipeline.ancestry.analysis':{'queue':'ancestry'}, 'thehonestgenepipeline.riskprediction.run':{'queue':'riskprediction'} }
Change from rpc to amqp backend Otherwise retrieving result are problematic
from kombu import Exchange, Queue import os BROKER_URL = os.environ['CELERY_BROKER'] CELERY_RESULT_BACKEND='amqp://' CELERY_RESULT_PERSISTENT = True CELERY_DISABLE_RATE_LIMITS = True CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] imputation_exchange = Exchange('imputation', type='direct') ancestry_exchange = Exchange('ancestry',type='direct') riskprediction_exchange = Exchange('riskprediction',type='direct') CELERY_QUEUES = ( Queue('imputation', imputation_exchange, routing_key='imputation'), Queue('ancestry', ancestry_exchange, routing_key='ancestry'), Queue('riskprediction',riskprediction_exchange,routing_key='riskprediction') ) CELERY_ROUTES = { 'thehonestgenepipeline.imputation.test':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.imputation':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.convert':{'queue':'imputation'}, 'thehonestgenepipeline.imputation.impute':{'queue':'imputation'}, 'thehonestgenepipeline.ancestry.analysis':{'queue':'ancestry'}, 'thehonestgenepipeline.riskprediction.run':{'queue':'riskprediction'} }
from cookiecutter.main import is_repo_url def test_is_repo_url(): """Verify is_repo_url works.""" assert is_repo_url('gitolite@server:team/repo') is True assert is_repo_url('[email protected]:audreyr/cookiecutter.git') is True assert is_repo_url('https://github.com/audreyr/cookiecutter.git') is True assert is_repo_url('gh:audreyr/cookiecutter-pypackage') is True assert is_repo_url('https://bitbucket.org/pokoli/cookiecutter.hg') is True assert is_repo_url('/audreyr/cookiecutter.git') is False assert is_repo_url('/home/audreyr/cookiecutter') is False appveyor_temp_dir = ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ) assert is_repo_url(appveyor_temp_dir) is False
Implement a test specifically for abbreviations
from cookiecutter.main import is_repo_url, expand_abbreviations def test_is_repo_url(): """Verify is_repo_url works.""" assert is_repo_url('gitolite@server:team/repo') is True assert is_repo_url('[email protected]:audreyr/cookiecutter.git') is True assert is_repo_url('https://github.com/audreyr/cookiecutter.git') is True assert is_repo_url('https://bitbucket.org/pokoli/cookiecutter.hg') is True assert is_repo_url('/audreyr/cookiecutter.git') is False assert is_repo_url('/home/audreyr/cookiecutter') is False appveyor_temp_dir = ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ) assert is_repo_url(appveyor_temp_dir) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True
"""test_void.py Test the parsing of VoID dump files. """ import RDF from glharvest import util def test_returns_none_if_the_registry_file_is_not_found(): m = util.load_file_into_model("nonexistantvoidfile.ttl") assert m is None def test_can_load_a_simple_void_file(): model = util.load_file_into_model('tests/data/simple-void.ttl', 'turtle') p = void.parse_void_model(m) assert p == { 'http://lod.dataone.org/test': { 'dataDump': 'http://lod.dataone.org/test.ttl', 'features': [ 'http://lod.dataone.org/fulldump' ] } }
Fix imports for void tests
"""test_void.py Test the parsing of VoID dump files. """ import RDF from glharvest import util, void def test_returns_none_if_the_registry_file_is_not_found(): m = util.load_file_into_model("nonexistantvoidfile.ttl") assert m is None def test_can_load_a_simple_void_file(): m = util.load_file_into_model('tests/data/simple-void.ttl', 'turtle') p = void.parse_void_model(m) assert p == { 'http://lod.dataone.org/test': { 'dataDump': 'http://lod.dataone.org/test.ttl', 'features': [ 'http://lod.dataone.org/fulldump' ] } }
from django.core.exceptions import ObjectDoesNotExist from db.models.repos import CodeReference def get_project_code_reference(project, commit=None): if not project.has_code: return None repo = project.repo if commit: try: return CodeReference.objects.get(repo=repo, commit=commit) except ObjectDoesNotExist: return None # If no commit is provided we get the last commit, and save new ref if not found last_commit = repo.last_commit if not last_commit: return None code_reference, _ = CodeReference.objects.get_or_create(repo=repo, commit=last_commit[0]) return code_reference def get_code_reference(instance, commit): return get_project_code_reference(instance.project, commit=commit) def assign_code_reference(instance, commit=None): if instance.code_reference is not None: return if not commit and instance.specification and instance.specification.build: commit = instance.specification.build.commit code_reference = get_code_reference(instance=instance, commit=commit) if code_reference: instance.code_reference = code_reference return instance
Extend code references with external repos
from django.core.exceptions import ObjectDoesNotExist from db.models.repos import CodeReference def get_code_reference(instance, commit=None, external_repo=None): project = instance.project repo = project.repo if project.has_code else external_repo if not repo: return None if commit: try: return CodeReference.objects.get(repo=repo, commit=commit) except ObjectDoesNotExist: return None # If no commit is provided we get the last commit, and save new ref if not found last_commit = repo.last_commit if not last_commit: return None code_reference, _ = CodeReference.objects.get_or_create(repo=repo, commit=last_commit[0]) return code_reference def assign_code_reference(instance, commit=None): if instance.code_reference is not None: return build = instance.specification.build if instance.specification else None if not commit and build: commit = build.commit external_repo = build.git if build and build.git else None code_reference = get_code_reference(instance=instance, commit=commit, external_repo=external_repo) if code_reference: instance.code_reference = code_reference return instance
from __future__ import absolute_import, unicode_literals import logging from io import StringIO from . import codec from .file import File log = logging.getLogger(__name__) class TextFile(File): """ Derived from :class:`pysparkling.fileio.File`. :param file_name: Any text file name. Supports the schemes ``http://``, ``s3://`` and ``file://``. """ def __init__(self, file_name): File.__init__(self, file_name) def load(self, encoding='utf8'): """ Load the data from a file. :param encoding: (optional) The character encoding of the file. :returns: An ``io.StringIO`` instance. Use ``getvalue()`` to get a string. """ if type(self.codec) == codec.Codec and \ getattr(self.fs, 'load_text'): print(self.codec) stream = self.fs.load_text() else: stream = self.fs.load() stream = StringIO( self.codec.decompress(stream).read().decode(encoding) ) return stream def dump(self, stream=None, encoding='utf8'): """ Writes a stream to a file. :param stream: An ``io.StringIO`` instance. :param encoding: (optional) The character encoding of the file. :returns: self """ if stream is None: stream = StringIO() stream = self.codec.compress(stream.read().encode(encoding)) self.fs.dump(stream) return self
Add fileio.TextFile and use it when reading and writing text files in RDD and Context.
from __future__ import absolute_import, unicode_literals import logging from io import BytesIO, StringIO from . import codec from .file import File log = logging.getLogger(__name__) class TextFile(File): """ Derived from :class:`pysparkling.fileio.File`. :param file_name: Any text file name. Supports the schemes ``http://``, ``s3://`` and ``file://``. """ def __init__(self, file_name): File.__init__(self, file_name) def load(self, encoding='utf8'): """ Load the data from a file. :param encoding: (optional) The character encoding of the file. :returns: An ``io.StringIO`` instance. Use ``getvalue()`` to get a string. """ if type(self.codec) == codec.Codec and \ getattr(self.fs, 'load_text'): print(self.codec) stream = self.fs.load_text() else: stream = self.fs.load() stream = StringIO( self.codec.decompress(stream).read().decode(encoding) ) return stream def dump(self, stream=None, encoding='utf8'): """ Writes a stream to a file. :param stream: An ``io.StringIO`` instance. :param encoding: (optional) The character encoding of the file. :returns: self """ if stream is None: stream = StringIO() stream = self.codec.compress( BytesIO(stream.read().encode(encoding)) ) self.fs.dump(stream) return self
AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'lib.pam_backend.PamBackend', ) AUTH_USER_MODEL = 'accounts.User' LOGIN_URL = '/login' PAM_SERVICES = { 'default': 'curc-twofactor-duo', 'csu': 'csu' }
Change PAM stack back to login
AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'lib.pam_backend.PamBackend', ) AUTH_USER_MODEL = 'accounts.User' LOGIN_URL = '/login' PAM_SERVICES = { 'default': 'login', 'csu': 'csu' }
"""Configuration for Django system.""" __version__ = "0.16.0" __version_info__ = tuple( [ int(num) if num.isdigit() else num for num in __version__.replace("-", ".", 1).split(".") ] )
Increment version number to 0.16.1
"""Configuration for Django system.""" __version__ = "0.16.1" __version_info__ = tuple( [ int(num) if num.isdigit() else num for num in __version__.replace("-", ".", 1).split(".") ] )
from unittest import TestCase from pymemcache.serde import (python_memcache_serializer, python_memcache_deserializer) class TestSerde(TestCase): def check(self, value): serialized, flags = python_memcache_serializer(b'key', value) deserialized = python_memcache_deserializer(b'key', serialized, flags) assert deserialized == value def test_str(self): self.check('value') def test_int(self): self.check(1) def test_long(self): self.check(123123123123123123123) def test_pickleable(self): self.check({'a': 'dict'})
Use byte strings after serializing with serde The pymemcache client will return a byte string, so we'll do the same to test that the deserializer works as expected. This currently fails with Python 3.
from unittest import TestCase from pymemcache.serde import (python_memcache_serializer, python_memcache_deserializer) import pytest import six @pytest.mark.unit() class TestSerde(TestCase): def check(self, value): serialized, flags = python_memcache_serializer(b'key', value) # pymemcache stores values as byte strings, so we immediately the value # if needed so deserialized works as it would with a real server if not isinstance(serialized, six.binary_type): serialized = six.text_type(serialized).encode('ascii') deserialized = python_memcache_deserializer(b'key', serialized, flags) assert deserialized == value def test_bytes(self): self.check(b'value') def test_unicode(self): self.check(u'value') def test_int(self): self.check(1) def test_long(self): self.check(123123123123123123123) def test_pickleable(self): self.check({'a': 'dict'})
from collections import namedtuple Remote = namedtuple('Remote', ('name', 'url')) CommitInfo = namedtuple("CommitInfo", ('commit', 'origin', 'remote_repo', 'ref')) class PRInfo(object): def __init__(self, json): self.json = json @property def base_sha(self): return self.json['base']['sha'] @property def head_sha(self): return self.json['head']['sha'] @property def base_ref(self): return self.json['base']['ref'] @property def head_ref(self): return self.json['head']['ref'] @property def has_remote_repo(self): return self.json['base']['repo']['owner']['login'] != \ self.json['head']['repo']['owner']['login'] @property def remote_repo(self): remote = None if self.has_remote_repo: remote = Remote(name=self.json['head']['repo']['owner']['login'], url=self.json['head']['repo']['ssh_url']) return remote def to_commit_info(self): return CommitInfo(self.base_sha, self.head_sha, self.remote_repo, self.head_ref) def get_pr_info(requester, reponame, number): "Returns the PullRequest as a PRInfo object" resp = requester.get( 'https://api.github.com/repos/%s/pulls/%s' % (reponame, number)) return PRInfo(resp.json())
Use github's `clone_url` instead of mandating ssh.
from collections import namedtuple Remote = namedtuple('Remote', ('name', 'url')) CommitInfo = namedtuple("CommitInfo", ('commit', 'origin', 'remote_repo', 'ref')) class PRInfo(object): def __init__(self, json): self.json = json @property def base_sha(self): return self.json['base']['sha'] @property def head_sha(self): return self.json['head']['sha'] @property def base_ref(self): return self.json['base']['ref'] @property def head_ref(self): return self.json['head']['ref'] @property def has_remote_repo(self): return self.json['base']['repo']['owner']['login'] != \ self.json['head']['repo']['owner']['login'] @property def remote_repo(self): remote = None if self.has_remote_repo: remote = Remote(name=self.json['head']['repo']['owner']['login'], url=self.json['head']['repo']['clone_url']) return remote def to_commit_info(self): return CommitInfo(self.base_sha, self.head_sha, self.remote_repo, self.head_ref) def get_pr_info(requester, reponame, number): "Returns the PullRequest as a PRInfo object" resp = requester.get( 'https://api.github.com/repos/%s/pulls/%s' % (reponame, number)) return PRInfo(resp.json())
# -*- coding: utf-8 -*- import hummus from tempfile import NamedTemporaryFile import os def assert_pdf(filename): with open(filename, 'rb') as stream: assert stream.read(4) == b'%PDF' def test_document_file(): with NamedTemporaryFile(delete=False) as stream: # Run through a normal cycle. document = hummus.Document(filename=stream.name) document.begin() document.end() # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name) def test_document_stream(): with NamedTemporaryFile(delete=False) as stream: # Run through a normal cycle. document = hummus.Document(stream) document.begin() document.end() # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name) def test_page_size(): page = hummus.Page() assert page.media_box.left == 0 page.media_box = hummus.Rectangle(0, 0, 800, 1000) assert page.media_box.bottom == 1000 def test_basic_text(): with NamedTemporaryFile(delete=False) as stream: with hummus.Document(stream) as document: with document.Page() as page: pass # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name)
Update tests for new API.
# -*- coding: utf-8 -*- import hummus from tempfile import NamedTemporaryFile import os def assert_pdf(filename): with open(filename, 'rb') as stream: assert stream.read(4) == b'%PDF' def test_document_file(): with NamedTemporaryFile(delete=False) as stream: # Run through a normal cycle. document = hummus.Document(stream.name) document.begin() document.end() # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name) def test_document_stream(): with NamedTemporaryFile(delete=False) as stream: # Run through a normal cycle. document = hummus.Document(stream) document.begin() document.end() # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name) def test_page_size(): page = hummus.Page() assert page.media_box.left == 0 page.media_box = hummus.Rectangle(0, 0, 800, 1000) assert page.media_box.bottom == 1000 def test_basic_text(): with NamedTemporaryFile(delete=False) as stream: with hummus.Document(stream) as document: with document.Page() as page: pass # Open and test the file. assert_pdf(stream.name) # Remove the file. os.remove(stream.name)
"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
Add East Riding of Yorkshire Council to migration
"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ('503', 'East Riding of Yorkshire Council'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
import datetime as dt import humanize def test_i18n(): three_seconds = dt.timedelta(seconds=3) assert humanize.naturaltime(three_seconds) == "3 seconds ago" humanize.i18n.activate("ru_RU") assert humanize.naturaltime(three_seconds) == "3 секунды назад" humanize.i18n.deactivate() assert humanize.naturaltime(three_seconds) == "3 seconds ago"
Add i18n test for humanize.ordinal
import datetime as dt import humanize def test_i18n(): three_seconds = dt.timedelta(seconds=3) assert humanize.naturaltime(three_seconds) == "3 seconds ago" assert humanize.ordinal(5) == "5th" try: humanize.i18n.activate("ru_RU") assert humanize.naturaltime(three_seconds) == "3 секунды назад" assert humanize.ordinal(5) == "5ый" finally: humanize.i18n.deactivate() assert humanize.naturaltime(three_seconds) == "3 seconds ago" assert humanize.ordinal(5) == "5th"
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import re import codecs ping = re.compile(u'.平') shang = re.compile(u'上聲') ru = re.compile(u'入') qu = re.compile(u'去') mydict = { } # f = open("../Data/TangRhymesMap.csv") f = codecs.open("../Data/TangRhymesMap.csv", "r", "utf-8") for line in f: line = line.rstrip() value, key = line.split(",") #key = char.decode("utf-8") #value = rhyme.decode("utf-8") mydict[key] = value f = codecs.open("../Data/SamplePoem.txt", "r", "utf-8") for line in f: line = line.rstrip() for key in line: if ping.match(mydict[key]): print key + " = " + " Ping" elif shang.match(mydict[key]): print key + " = " + " Shang" elif qu.match(mydict[key]): print key + " = " + " Qu" elif ru.match(mydict[key]): print key + " = " + " Ru" else: print key + " = " + " *"
Print the character without Rhyme if it is not on the Rhyme Dictionary
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import re import codecs ping = re.compile(u'.平') shang = re.compile(u'上聲') ru = re.compile(u'入') qu = re.compile(u'去') mydict = { } # f = open("../Data/TangRhymesMap.csv") f = codecs.open("../Data/TangRhymesMap.csv", "r", "utf-8") for line in f: line = line.rstrip() value, key = line.split(",") #key = char.decode("utf-8") #value = rhyme.decode("utf-8") mydict[key] = value f = codecs.open("../Data/SamplePoem.txt", "r", "utf-8") for line in f: line = line.rstrip() for key in line: if key not in mydict: print key elif ping.match(mydict[key]): print key + " = " + " Ping" elif shang.match(mydict[key]): print key + " = " + " Shang" elif qu.match(mydict[key]): print key + " = " + " Qu" elif ru.match(mydict[key]): print key + " = " + " Ru" else: print key + " = " + " *"
from django.conf import settings from django.conf.urls import include, url # from django.contrib import admin from django.conf.urls.static import static from . import views urlpatterns = [ ### API ### url(r'^$', views.root), url(r'^nodes/', include('api.nodes.urls', namespace='nodes')), url(r'^users/', include('api.users.urls', namespace='users')), url(r'^docs/', include('rest_framework_swagger.urls')), ] + static('/static/', document_root=settings.STATIC_ROOT)
Change API url prefix to 'v2'
from django.conf import settings from django.conf.urls import include, url, patterns # from django.contrib import admin from django.conf.urls.static import static from . import views urlpatterns = [ ### API ### url(r'^v2/', include(patterns('', url(r'^$', views.root), url(r'^nodes/', include('api.nodes.urls', namespace='nodes')), url(r'^users/', include('api.users.urls', namespace='users')), url(r'^docs/', include('rest_framework_swagger.urls')), )))] + static('/static/', document_root=settings.STATIC_ROOT)
"""Added end_date to full text index events Revision ID: 573faf4ac644 Revises: 342fa3076650 Create Date: 2015-03-06 17:26:54.718493 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = '573faf4ac644' down_revision = '342fa3076650' def upgrade(): op.alter_column('event_index', 'start_date', nullable=False, schema='events') op.create_index('ix_start_date', 'event_index', ['start_date'], schema='events') op.add_column('event_index', sa.Column('end_date', sa.DateTime(), nullable=False, server_default='now()'), schema='events') op.alter_column('event_index', 'end_date', server_default=None, schema='events') op.create_index('ix_end_date', 'event_index', ['end_date'], schema='events') def downgrade(): op.alter_column('event_index', 'start_date', nullable=True, schema='events') op.drop_index('ix_start_date', table_name='event_index', schema='events') op.drop_column('event_index', 'end_date', schema='events')
Use index name matching the current naming schema
"""Added end_date to full text index events Revision ID: 573faf4ac644 Revises: 342fa3076650 Create Date: 2015-03-06 17:26:54.718493 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = '573faf4ac644' down_revision = '342fa3076650' def upgrade(): op.alter_column('event_index', 'start_date', nullable=False, schema='events') op.create_index('ix_events_event_index_start_date', 'event_index', ['start_date'], schema='events') op.add_column('event_index', sa.Column('end_date', sa.DateTime(), nullable=False, server_default='now()'), schema='events') op.alter_column('event_index', 'end_date', server_default=None, schema='events') op.create_index('ix_events_event_index_end_date', 'event_index', ['end_date'], schema='events') def downgrade(): op.alter_column('event_index', 'start_date', nullable=True, schema='events') op.drop_index('ix_events_event_index_start_date', table_name='event_index', schema='events') op.drop_column('event_index', 'end_date', schema='events')
from django.contrib.auth.models import User from django.db import models from django.db.models import fields from django.utils import timezone class Transaction(models.Model): EXPENSE = 'exp' INCOME = 'inc' CATEGORY_CHOICES = ( (EXPENSE, 'expense'), (INCOME, 'income'), ) title = fields.CharField(max_length=255) amount = fields.DecimalField(max_digits=10, decimal_places=2) category = fields.CharField(max_length=3, choices=CATEGORY_CHOICES) created = fields.DateTimeField(default=timezone.now, editable=False) modified = fields.DateTimeField(default=timezone.now) user = models.ForeignKey(User) def __str__(self): return "{}".format(self.title)
Create new model for debts and loans
from django.contrib.auth.models import User from django.db import models from django.db.models import fields from django.utils import timezone class Transaction(models.Model): EXPENSE = 'exp' INCOME = 'inc' CATEGORY_CHOICES = ( (EXPENSE, 'expense'), (INCOME, 'income'), ) title = fields.CharField(max_length=255) amount = fields.DecimalField(max_digits=10, decimal_places=2) category = fields.CharField(max_length=3, choices=CATEGORY_CHOICES) created = fields.DateTimeField(default=timezone.now, editable=False) modified = fields.DateTimeField(default=timezone.now) user = models.ForeignKey(User) def __str__(self): return "{}".format(self.title) class DebtLoan(models.Model): DEBT = 0 LOAN = 1 CATEGORY_CHOICES = ( (DEBT, 'debt'), (LOAN, 'loan'), ) with_who = fields.CharField(max_length=255) title = fields.CharField(max_length=255, null=True, blank=True) amount = fields.DecimalField(max_digits=10, decimal_places=2) category = fields.PositiveSmallIntegerField(choices=CATEGORY_CHOICES) created = fields.DateTimeField(default=timezone.now, editable=False) modified = fields.DateTimeField(default=timezone.now) user = models.ForeignKey(User) def __str__(self): if self.title: return "{}: {}".format(self.with_who, self.title) else: return "{}".format(self.with_who)
"""Configuration.""" import logging import os import re from google.appengine.ext.appstats import recording logging.info('Loading %s from %s', __name__, __file__) # Custom webapp middleware to add Appstats. def webapp_add_wsgi_middleware(app): app = recording.appstats_wsgi_middleware(app) return app # Custom Appstats path normalization. def appstats_normalize_path(path): if path.startswith('/user/'): return '/user/X' if path.startswith('/user_popup/'): return '/user_popup/X' if '/diff/' in path: return '/X/diff/...' if '/diff2/' in path: return '/X/diff2/...' if '/patch/' in path: return '/X/patch/...' if path.startswith('/rss/'): i = path.find('/', 5) if i > 0: return path[:i] + '/X' return re.sub(r'\d+', 'X', path) # Segregate Appstats by runtime (python vs. python27). appstats_KEY_NAMESPACE = '__appstats_%s__' % os.getenv('APPENGINE_RUNTIME') # Django 1.2+ requires DJANGO_SETTINGS_MODULE environment variable to be set # http://code.google.com/appengine/docs/python/tools/libraries.html#Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' # NOTE: All "main" scripts must import webapp.template before django.
Enable the Appstats Interactive Playground.
"""Configuration.""" import logging import os import re from google.appengine.ext.appstats import recording logging.info('Loading %s from %s', __name__, __file__) # Custom webapp middleware to add Appstats. def webapp_add_wsgi_middleware(app): app = recording.appstats_wsgi_middleware(app) return app # Custom Appstats path normalization. def appstats_normalize_path(path): if path.startswith('/user/'): return '/user/X' if path.startswith('/user_popup/'): return '/user_popup/X' if '/diff/' in path: return '/X/diff/...' if '/diff2/' in path: return '/X/diff2/...' if '/patch/' in path: return '/X/patch/...' if path.startswith('/rss/'): i = path.find('/', 5) if i > 0: return path[:i] + '/X' return re.sub(r'\d+', 'X', path) # Segregate Appstats by runtime (python vs. python27). appstats_KEY_NAMESPACE = '__appstats_%s__' % os.getenv('APPENGINE_RUNTIME') # Enable Interactive Playground. appstats_SHELL_OK = True # Django 1.2+ requires DJANGO_SETTINGS_MODULE environment variable to be set # http://code.google.com/appengine/docs/python/tools/libraries.html#Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' # NOTE: All "main" scripts must import webapp.template before django.
from django.core.urlresolvers import reverse from django.test import TestCase class HTTPGetRootTestCase(TestCase): def setUp(self): pass def test_get_root_expect_http_200(self): url = reverse('microauth_authentication:index') response = self.client.get(url) self.assertEqual(200, response.status_code, 'Expect root view to load without issues.')
Make test not depend on django-pipeline
from django.conf import settings from django.core.urlresolvers import reverse from django.test import TestCase from django.test.utils import override_settings class HTTPGetRootTestCase(TestCase): def setUp(self): pass def test_get_root_expect_http_200(self): pipeline_settings = settings.PIPELINE pipeline_settings['PIPELINE_ENABLED'] = False with override_settings(PIPELINE_SETTINGS=pipeline_settings): url = reverse('microauth_authentication:index') response = self.client.get(url) self.assertEqual(200, response.status_code, 'Expect root view to load without issues.')
""" A collection of Django extensions that add content-management facilities to Django projects. Developed by Dave Hall. <http://etianen.com/> """ VERSION = (1, 8, 5)
Update version number to 1.9
""" A collection of Django extensions that add content-management facilities to Django projects. Developed by Dave Hall. <http://etianen.com/> """ VERSION = (1, 9)
from automatron.backend.plugin import PluginManager from automatron.controller.controller import IAutomatronClientActions from automatron.core.controller import BaseController class BackendController(BaseController): def __init__(self, config_file): BaseController.__init__(self, config_file) self.plugins = None def prepareService(self): # Load plugins self.plugins = PluginManager(self) def __getattr__(self, item): def proxy(*args): self.plugins.emit(IAutomatronClientActions[item], *args) return proxy
Use functools.partial for client action proxy.
from functools import partial from automatron.backend.plugin import PluginManager from automatron.controller.controller import IAutomatronClientActions from automatron.core.controller import BaseController class BackendController(BaseController): def __init__(self, config_file): BaseController.__init__(self, config_file) self.plugins = None def prepareService(self): # Load plugins self.plugins = PluginManager(self) def __getattr__(self, item): return partial(self.plugins.emit, IAutomatronClientActions[item])
import itertools event_id_count = itertools.count() class SimEvent(object): #def __init__(self, address=None, stmt_idx=None, message=None, exception=None, traceback=None): def __init__(self, state, event_type, **kwargs): self.id = event_id_count.next() self.type = event_type self.ins_addr = state.scratch.ins_addr self.bbl_addr = state.scratch.bbl_addr self.stmt_idx = state.scratch.stmt_idx self.sim_procedure = state.scratch.sim_procedure.__class__ self.objects = dict(kwargs) def __repr__(self): return "<SimEvent %s %d, with fields %s>" % (self.type, self.id, self.objects.keys()) def _copy_event(self): c = self.__class__.__new__(self.__class__) c.id = self.id c.type = self.type c.bbl_addr = self.bbl_addr c.stmt_idx = self.stmt_idx c.sim_procedure = self.sim_procedure c.objects = dict(self.objects) return c
Set None instead of NoneType to SimEvent.sim_procedure to make pickle happy.
import itertools event_id_count = itertools.count() class SimEvent(object): #def __init__(self, address=None, stmt_idx=None, message=None, exception=None, traceback=None): def __init__(self, state, event_type, **kwargs): self.id = event_id_count.next() self.type = event_type self.ins_addr = state.scratch.ins_addr self.bbl_addr = state.scratch.bbl_addr self.stmt_idx = state.scratch.stmt_idx self.sim_procedure = None if state.scratch.sim_procedure is None else state.scratch.sim_procedure.__class__ self.objects = dict(kwargs) def __repr__(self): return "<SimEvent %s %d, with fields %s>" % (self.type, self.id, self.objects.keys()) def _copy_event(self): c = self.__class__.__new__(self.__class__) c.id = self.id c.type = self.type c.bbl_addr = self.bbl_addr c.stmt_idx = self.stmt_idx c.sim_procedure = self.sim_procedure c.objects = dict(self.objects) return c
from ansiblelint import AnsibleLintRule class NoFormattingInWhenRule(AnsibleLintRule): id = 'CINCH0001' shortdesc = 'No Jinja2 in when' description = '"when" lines should not include Jinja2 variables' tags = ['deprecated'] def _is_valid(self, when): if not isinstance(when, (str, unicode)): return True return when.find('{{') == -1 and when.find('}}') == -1 def matchplay(self, file, play): errors = [] if isinstance(play, dict): if 'roles' not in play: return errors for role in play['roles']: if self.matchtask(file, role): errors.append(({'when': role}, 'role "when" clause has Jinja2 templates')) if isinstance(play, list): for play_item in play: sub_errors = self.matchplay(file, play_item) if sub_errors: errors = errors + sub_errors return errors def matchtask(self, file, task): return 'when' in task and not self._is_valid(task['when'])
Fix Python3 unicode test error
from ansiblelint import AnsibleLintRule try: from types import StringTypes except ImportError: # Python3 removed types.StringTypes StringTypes = str, class NoFormattingInWhenRule(AnsibleLintRule): id = 'CINCH0001' shortdesc = 'No Jinja2 in when' description = '"when" lines should not include Jinja2 variables' tags = ['deprecated'] def _is_valid(self, when): if not isinstance(when, StringTypes): return True return when.find('{{') == -1 and when.find('}}') == -1 def matchplay(self, file, play): errors = [] if isinstance(play, dict): if 'roles' not in play: return errors for role in play['roles']: if self.matchtask(file, role): errors.append(({'when': role}, 'role "when" clause has Jinja2 templates')) if isinstance(play, list): for play_item in play: sub_errors = self.matchplay(file, play_item) if sub_errors: errors = errors + sub_errors return errors def matchtask(self, file, task): return 'when' in task and not self._is_valid(task['when'])
try: import uio as io except ImportError: import io import sys if hasattr(sys, 'print_exception'): print_exception = sys.print_exception else: import traceback print_exception = lambda e, f: traceback.print_exception(None, e, sys.exc_info()[2], file=f) def print_exc(e): buf = io.StringIO() print_exception(e, buf) s = buf.getvalue() for l in s.split("\n"): # uPy on pyboard prints <stdin> as file, so remove filename. if l.startswith(" File "): l = l.split('"') print(l[0], l[2]) # uPy and CPy tracebacks differ in that CPy prints a source line for # each traceback entry. In this case, we know that offending line # has 4-space indent, so filter it out. elif not l.startswith(" "): print(l) # basic exception message try: 1/0 except Exception as e: print('caught') print_exc(e) # exception message with more than 1 source-code line def f(): g() def g(): 2/0 try: f() except Exception as e: print('caught') print_exc(e)
tests/misc: Add test for line number printing with large bytecode chunk.
try: import uio as io except ImportError: import io import sys if hasattr(sys, 'print_exception'): print_exception = sys.print_exception else: import traceback print_exception = lambda e, f: traceback.print_exception(None, e, sys.exc_info()[2], file=f) def print_exc(e): buf = io.StringIO() print_exception(e, buf) s = buf.getvalue() for l in s.split("\n"): # uPy on pyboard prints <stdin> as file, so remove filename. if l.startswith(" File "): l = l.split('"') print(l[0], l[2]) # uPy and CPy tracebacks differ in that CPy prints a source line for # each traceback entry. In this case, we know that offending line # has 4-space indent, so filter it out. elif not l.startswith(" "): print(l) # basic exception message try: 1/0 except Exception as e: print('caught') print_exc(e) # exception message with more than 1 source-code line def f(): g() def g(): 2/0 try: f() except Exception as e: print('caught') print_exc(e) # Here we have a function with lots of bytecode generated for a single source-line, and # there is an error right at the end of the bytecode. It should report the correct line. def f(): f([1, 2], [1, 2], [1, 2], {1:1, 1:1, 1:1, 1:1, 1:1, 1:1, 1:X}) return 1 try: f() except Exception as e: print_exc(e)
# The history of this repository has been rewritten to erase the vendor/ directory # Below is the md5sum and size of the file that was in the original commit bde0e3a3a15c9bbb8d96f4d8a370d8c7 5753
Drop eventlet bundle back to released state. Will workaround the bug we fixed there, in our own code.
# The history of this repository has been rewritten to erase the vendor/ directory # Below is the md5sum and size of the file that was in the original commit 5b7615cc9b13cf39cfa39db53e86977a 5751
#!/usr/bin/env python # -*- coding: utf8 -*- __author__ = 'eric' ''' Need to create some test data '''
Test script for generating metadata
#!/usr/bin/env python # -*- coding: utf8 -*- __author__ = 'eric' ''' Need to create some test data 8 gigabytes dataset '''
"""The ox_herd package provides a way for herding tests together (like oxen). """ VERSION = '0.6.10'
Fix bug in showing form
"""The ox_herd package provides a way for herding tests together (like oxen). """ VERSION = '0.6.11'
import logging from .current_playlist import CurrentPlaylistController from .library import LibraryController, BaseLibraryProvider from .playback import PlaybackController, BasePlaybackProvider from .stored_playlists import (StoredPlaylistsController, BaseStoredPlaylistsProvider) logger = logging.getLogger('mopidy.backends.base') class Backend(object): #: The current playlist controller. An instance of #: :class:`mopidy.backends.base.CurrentPlaylistController`. current_playlist = None #: The library controller. An instance of # :class:`mopidy.backends.base.LibraryController`. library = None #: The sound mixer. An instance of :class:`mopidy.mixers.BaseMixer`. mixer = None #: The playback controller. An instance of #: :class:`mopidy.backends.base.PlaybackController`. playback = None #: The stored playlists controller. An instance of #: :class:`mopidy.backends.base.StoredPlaylistsController`. stored_playlists = None #: List of URI prefixes this backend can handle. uri_handlers = []
Remove mixer from the Backend API as it is independent
import logging from .current_playlist import CurrentPlaylistController from .library import LibraryController, BaseLibraryProvider from .playback import PlaybackController, BasePlaybackProvider from .stored_playlists import (StoredPlaylistsController, BaseStoredPlaylistsProvider) logger = logging.getLogger('mopidy.backends.base') class Backend(object): #: The current playlist controller. An instance of #: :class:`mopidy.backends.base.CurrentPlaylistController`. current_playlist = None #: The library controller. An instance of # :class:`mopidy.backends.base.LibraryController`. library = None #: The playback controller. An instance of #: :class:`mopidy.backends.base.PlaybackController`. playback = None #: The stored playlists controller. An instance of #: :class:`mopidy.backends.base.StoredPlaylistsController`. stored_playlists = None #: List of URI prefixes this backend can handle. uri_handlers = []
from .robot import Robot from .modules import *
Add null handler as default for logging.
import logging from .robot import Robot from .modules import * nh = logging.NullHandler() logging.getLogger(__name__).addHandler(nh)
def get_related_model(rel): # In Django 1.7 and under, the related model is accessed by doing: rel.model # This was renamed in Django 1.8 to rel.related_model. rel.model now returns # the base model. return getattr(rel, 'related_model', rel.model)
Check Django version instead of hasattr
import django def get_related_model(rel): # In Django 1.7 and under, the related model is accessed by doing: rel.model # This was renamed in Django 1.8 to rel.related_model. rel.model now returns # the base model. if django.VERSION >= (1, 8): return rel.related_model else: return rel.model
#!/usr/bin/env python # -*- coding: utf-8 -*- class Handler(object): def __init__(self, db): # Each handlers is formatted following # the pattern : [ command, # default return value, # raised error ] self.handles = { 'GET': (db.Get, "", KeyError), 'PUT': (db.Put, "True", TypeError), 'DELETE': (db.Delete, ""), } def command(self, message): op_code = message.op_code args = message.data if op_code in self.handles: if len(self.handles[op_code]) == 2: return self.handles[op_code](*args) else: # FIXME # global except catching is a total # performance killer. Should enhance # the handles attributes to link possible # exceptions with leveldb methods. try: value = self.handles[op_code][0](*args) except self.handles[op_code][2]: return "" else: raise KeyError("op_code not handle") return value if value else self.handles[op_code][1]
Fix : delete command call badly formatted in handler
#!/usr/bin/env python # -*- coding: utf-8 -*- class Handler(object): def __init__(self, db): # Each handlers is formatted following # the pattern : [ command, # default return value, # raised error ] self.handles = { 'GET': (db.Get, "", KeyError), 'PUT': (db.Put, "True", TypeError), 'DELETE': (db.Delete, ""), } def command(self, message): op_code = message.op_code args = message.data if op_code in self.handles: if len(self.handles[op_code]) == 2: value = self.handles[op_code][0](*args) else: # FIXME # global except catching is a total # performance killer. Should enhance # the handles attributes to link possible # exceptions with leveldb methods. try: value = self.handles[op_code][0](*args) except self.handles[op_code][2]: return "" else: raise KeyError("op_code not handle") return value if value else self.handles[op_code][1]
import wx.stc ident = "makefile" name = "Makefile" extensions = ["Makefile", "*.mk"] lexer = wx.stc.STC_LEX_MAKEFILE indent = 8 use_tabs = True stylespecs = ( (wx.stc.STC_STYLE_DEFAULT, ""), ) keywords = ""
Make files ending in makefile or Makefile.
import wx.stc ident = "makefile" name = "Makefile" extensions = ["*Makefile", "*makefile", "*.mk"] lexer = wx.stc.STC_LEX_MAKEFILE indent = 8 use_tabs = True stylespecs = ( (wx.stc.STC_STYLE_DEFAULT, ""), ) keywords = ""
from django_filters import FilterSet, CharFilter, IsoDateTimeFilter, BooleanFilter, ModelChoiceFilter from falmer.events.models import Curator from . import models class EventFilterSet(FilterSet): class Meta: model = models.Event fields = ( 'title', 'venue', 'type', 'bundle', 'parent', 'brand', 'student_group', 'from_time', 'to_time', 'audience_just_for_pgs', 'audience_suitable_kids_families', 'audience_good_to_meet_people', 'is_over_18_only', 'cost', 'alcohol', 'type', 'ticket_level', 'curated_by' ) title = CharFilter(lookup_expr='icontains') brand = CharFilter(field_name='brand__slug') bundle = CharFilter(field_name='bundle__slug') to_time = IsoDateTimeFilter(field_name='start_time', lookup_expr='lte') from_time = IsoDateTimeFilter(field_name='end_time', lookup_expr='gte') uncurated = BooleanFilter(field_name='curated_by', lookup_expr='isnull') curated_by = ModelChoiceFilter(queryset=Curator.objects.all(), field_name='curated_by') # # class BrandingPeriodFilerSet(FilterSet): # class Meta: # model = BrandingPeriod
Use SG slug for event filtering
from django_filters import FilterSet, CharFilter, IsoDateTimeFilter, BooleanFilter, ModelChoiceFilter from falmer.events.models import Curator from . import models class EventFilterSet(FilterSet): class Meta: model = models.Event fields = ( 'title', 'venue', 'type', 'bundle', 'parent', 'brand', 'student_group', 'from_time', 'to_time', 'audience_just_for_pgs', 'audience_suitable_kids_families', 'audience_good_to_meet_people', 'is_over_18_only', 'cost', 'alcohol', 'type', 'ticket_level', 'curated_by' ) title = CharFilter(lookup_expr='icontains') brand = CharFilter(field_name='brand__slug') bundle = CharFilter(field_name='bundle__slug') student_group = CharFilter(field_name='student_group__slug') to_time = IsoDateTimeFilter(field_name='start_time', lookup_expr='lte') from_time = IsoDateTimeFilter(field_name='end_time', lookup_expr='gte') uncurated = BooleanFilter(field_name='curated_by', lookup_expr='isnull') curated_by = ModelChoiceFilter(queryset=Curator.objects.all(), field_name='curated_by') # # class BrandingPeriodFilerSet(FilterSet): # class Meta: # model = BrandingPeriod
class BaseStorage(object): """docstring for BaseStorage""" def __init__(self): super(BaseStorage, self).__init__() def filter(self, criteria): raise Exception("Not implemneted Error") def getSummary(self, criteria): raise Exception("Not implemneted Error") def insert(self, measurement): raise Exception("Not implemented Error") def delete(self, measurementId): raise Exception("Not imlemented Error")
Add tuncate method to BaseStorage class This will provide an interface for supporting any new database, there by, making the code more robust.
class BaseStorage(object): """docstring for BaseStorage""" def __init__(self): super(BaseStorage, self).__init__() def filter(self, criteria): raise Exception("Not implemneted Error") def getSummary(self, criteria): raise Exception("Not implemneted Error") def insert(self, measurement): raise Exception("Not implemented Error") def delete(self, measurementId): raise Exception("Not imlemented Error") def truncate(self): raise Exception("Not imlemented Error")
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.shortcuts import render from molo.core.models import ArticlePage from molo.commenting.models import MoloComment from wagtail.wagtailsearch.models import Query def search(request, results_per_page=10): search_query = request.GET.get('q', None) page = request.GET.get('p', 1) if search_query: results = ArticlePage.objects.live().search(search_query) Query.get(search_query).add_hit() else: results = ArticlePage.objects.none() paginator = Paginator(results, results_per_page) try: search_results = paginator.page(page) except PageNotAnInteger: search_results = paginator.page(1) except EmptyPage: search_results = paginator.page(paginator.num_pages) return render(request, 'search/search_results.html', { 'search_query': search_query, 'search_results': search_results, 'results': results, }) def report_response(request, comment_pk): comment = MoloComment.objects.get(pk=comment_pk) return render(request, 'comments/report_response.html', { 'article': comment.content_object, })
Add multi-languages support for search
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.shortcuts import render from django.utils.translation import get_language_from_request from molo.core.utils import get_locale_code from molo.core.models import ArticlePage from molo.commenting.models import MoloComment from wagtail.wagtailsearch.models import Query def search(request, results_per_page=10): search_query = request.GET.get('q', None) page = request.GET.get('p', 1) locale = get_locale_code(get_language_from_request(request)) if search_query: results = ArticlePage.objects.filter( languages__language__locale=locale).live().search(search_query) Query.get(search_query).add_hit() else: results = ArticlePage.objects.none() paginator = Paginator(results, results_per_page) try: search_results = paginator.page(page) except PageNotAnInteger: search_results = paginator.page(1) except EmptyPage: search_results = paginator.page(paginator.num_pages) return render(request, 'search/search_results.html', { 'search_query': search_query, 'search_results': search_results, 'results': results, }) def report_response(request, comment_pk): comment = MoloComment.objects.get(pk=comment_pk) return render(request, 'comments/report_response.html', { 'article': comment.content_object, })
# -*- coding: utf-8 -*- """ mackerel.host ~~~~~~~~~~~~~ Mackerel client implemented by Pyton. Ported from `mackerel-client-ruby`. <https://github.com/mackerelio/mackerel-client-ruby> :copyright: (c) 2014 Hatena, All rights reserved. :copyright: (c) 2015 Shinya Ohyanagi, All rights reserved. :license: BSD, see LICENSE for more details. """ import re class Host(object): MACKEREL_INTERFACE_NAME_PATTERN = re.compile(r'^eth\d') def __init__(self, **kwargs): self.args = kwargs self.name = kwargs.get('name') self.meta = kwargs.get('meta') self.type = kwargs.get('type') self.status = kwargs.get('status') self.memo = kwargs.get('memo') self.is_retired = kwargs.get('isRetired') self.id = kwargs.get('id') self.created_at = kwargs.get('createdAt') self.roles = kwargs.get('roles') self.interfaces = kwargs.get('interfaces') def ip_addr(self): pass def mac_addr(self): pass
Add None if kwargs can not get.
# -*- coding: utf-8 -*- """ mackerel.host ~~~~~~~~~~~~~ Mackerel client implemented by Pyton. Ported from `mackerel-client-ruby`. <https://github.com/mackerelio/mackerel-client-ruby> :copyright: (c) 2014 Hatena, All rights reserved. :copyright: (c) 2015 Shinya Ohyanagi, All rights reserved. :license: BSD, see LICENSE for more details. """ import re class Host(object): MACKEREL_INTERFACE_NAME_PATTERN = re.compile(r'^eth\d') def __init__(self, **kwargs): self.args = kwargs self.name = kwargs.get('name', None) self.meta = kwargs.get('meta', None) self.type = kwargs.get('type', None) self.status = kwargs.get('status', None) self.memo = kwargs.get('memo', None) self.is_retired = kwargs.get('isRetired', None) self.id = kwargs.get('id', None) self.created_at = kwargs.get('createdAt', None) self.roles = kwargs.get('roles', None) self.interfaces = kwargs.get('interfaces', None) def ip_addr(self): pass def mac_addr(self): pass
import contextlib import os from libqtile.backend.wayland.core import Core from test.helpers import Backend wlr_env = { "WLR_BACKENDS": "headless", "WLR_LIBINPUT_NO_DEVICES": "1", "WLR_RENDERER_ALLOW_SOFTWARE": "1", "WLR_RENDERER": "pixman", } @contextlib.contextmanager def wayland_environment(outputs): """This backend just needs some environmental variables set""" env = wlr_env.copy() env["WLR_HEADLESS_OUTPUTS"] = str(outputs) yield env class WaylandBackend(Backend): def __init__(self, env, args=()): self.env = env self.args = args self.core = Core self.manager = None def create(self): """This is used to instantiate the Core""" os.environ.update(self.env) return self.core(*self.args) def configure(self, manager): """This backend needs to get WAYLAND_DISPLAY variable.""" success, display = manager.c.eval("self.core.display_name") assert success self.env["WAYLAND_DISPLAY"] = display def fake_click(self, x, y): """Click at the specified coordinates""" raise NotImplementedError def get_all_windows(self): """Get a list of all windows in ascending order of Z position""" raise NotImplementedError
Add Wayland Backend.fake_click and Backend.get_all_windows methods These work by eval-ing in the test Qtile instance. It might be nicer to instead make these cmd_s on the `Core` if/when we expose cmd_ methods from the Core.
import contextlib import os import textwrap from libqtile.backend.wayland.core import Core from test.helpers import Backend wlr_env = { "WLR_BACKENDS": "headless", "WLR_LIBINPUT_NO_DEVICES": "1", "WLR_RENDERER_ALLOW_SOFTWARE": "1", "WLR_RENDERER": "pixman", } @contextlib.contextmanager def wayland_environment(outputs): """This backend just needs some environmental variables set""" env = wlr_env.copy() env["WLR_HEADLESS_OUTPUTS"] = str(outputs) yield env class WaylandBackend(Backend): def __init__(self, env, args=()): self.env = env self.args = args self.core = Core self.manager = None def create(self): """This is used to instantiate the Core""" os.environ.update(self.env) return self.core(*self.args) def configure(self, manager): """This backend needs to get WAYLAND_DISPLAY variable.""" success, display = manager.c.eval("self.core.display_name") assert success self.env["WAYLAND_DISPLAY"] = display def fake_click(self, x, y): """Click at the specified coordinates""" self.manager.c.eval(textwrap.dedent(""" self.core._focus_by_click() self.core._process_cursor_button(1, True) """)) def get_all_windows(self): """Get a list of all windows in ascending order of Z position""" success, result = self.manager.c.eval(textwrap.dedent(""" [win.wid for win in self.core.mapped_windows] """)) assert success return eval(result)
""" NeuPy is the Artificial Neural Network library implemented in Python. """ __version__ = '0.2.1'
Set up version 0.3.0 dev 1.
""" NeuPy is the Artificial Neural Network library implemented in Python. """ __version__ = '0.3.0dev1'
#!/usr/bin/env python from __future__ import print_function import multyvac multyvac.config.set_key(api_key='admin', api_secret_key='12345', api_url='http://docker:8000/v1') def add(a, b): return a + b jid = multyvac.submit(add, 3, 4) result = multyvac.get(jid).get_result() print("result = {}".format(result))
Allow api_url in the script to be configurable
#!/usr/bin/env python # CLOUDPIPE_URL=http://`echo $DOCKER_HOST | cut -d ":" -f2 | tr -d "/"`:8000/v1 python2 script/sample/submitpython.py from __future__ import print_function import multyvac import os # Grab from the CLOUDPIPE_URL environment variable, otherwise assume they have # /etc/hosts configured to point to their docker api_url = os.environ.get('CLOUDPIPE_URL', 'http://docker:8000/v1') multyvac.config.set_key(api_key='admin', api_secret_key='12345', api_url=api_url) def add(a, b): return a + b jid = multyvac.submit(add, 3, 4) result = multyvac.get(jid).get_result() print("added {} and {} to get {}... in the cloud!".format(3,4,result))
# -*- coding: utf-8 -*- # This file is part of wger Workout Manager. # # wger Workout Manager is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # wger Workout Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License from django.utils.translation import ( pgettext, ugettext_lazy as _ ) from django.forms import ( Form, CharField, Textarea ) class EmailListForm(Form): ''' Small form to send emails ''' subject = CharField(label=pgettext('Subject', 'As in "email subject"')) body = CharField(widget=Textarea, label=pgettext('Content', 'As in "content of an email"'))
Use correct order of arguments of pgettext
# -*- coding: utf-8 -*- # This file is part of wger Workout Manager. # # wger Workout Manager is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # wger Workout Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License from django.utils.translation import ( pgettext, ugettext_lazy as _ ) from django.forms import ( Form, CharField, Textarea ) class EmailListForm(Form): ''' Small form to send emails ''' subject = CharField(label=pgettext('As in "email subject"', 'Subject')) body = CharField(widget=Textarea, label=pgettext('As in "content of an email"', 'Content'))
from django.core.urlresolvers import reverse from django.db import models from django.utils.translation import ugettext_lazy as _ class Person(models.Model): first_name = models.CharField(verbose_name=_(u"First Name"), max_length=75, blank=True) last_name = models.CharField(verbose_name=_(u"Last Name"), max_length=75, blank=True) height = models.IntegerField(blank=True) email = models.EmailField()
Use email to 'print' a person
from django.core.urlresolvers import reverse from django.db import models from django.utils.translation import ugettext_lazy as _ class Person(models.Model): first_name = models.CharField(verbose_name=_(u"First Name"), max_length=75, blank=True) last_name = models.CharField(verbose_name=_(u"Last Name"), max_length=75, blank=True) height = models.IntegerField(blank=True) email = models.EmailField() def __unicode__ (self): return self.email