max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
labs/lab-1/exercises/lab-1.1-introduction-to-tensorflow/1.1-introduction-to-tensorflow.py | rubenandrebarreiro/fct-nova-deep-learning-labs | 1 | 12799551 | <reponame>rubenandrebarreiro/fct-nova-deep-learning-labs
"""
Lab 1.1 - Introduction to TensorFlow
Author:
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
"""
# Import the Libraries and Packages
# Import the Operative System Library as operative_system
import os as operative_system
# Disable all the Debugging Logs from TensorFlow Library
operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Import the TensorFlow Library as tensorflow alias
import tensorflow as tensorflow
# Constants
LOGGING_FLAG = True
# Create the constant "a" as a float of 32 bits and
# assign to it the value 3
a = tensorflow.constant(3.0, dtype=tensorflow.float32, name="a")
# Create the constant "b" and assign to it the value 4
b = tensorflow.constant(4.0, name="b")
# Create the addition of the constants "a" and "b", as "total",
# i.e., total = a + b
total = tensorflow.add(a, b, name="total")
# If the Logging Flag is set to True
if LOGGING_FLAG:
# Print the header for the Logging
tensorflow.print("\n\nLogging of the Execution:\n")
# Print the Tensor for the constant "a"
tensorflow.print("a = ", a)
# Print the Tensor for the constant "b"
tensorflow.print("b = ", b)
# Print the Tensor for the addition of
# the constants "a" and "b", as "total"
tensorflow.print("total = a + b = ", total)
| 3.078125 | 3 |
tests/components/meteo_france/conftest.py | jasperro/core | 7 | 12799552 | """Meteo-France generic test utils."""
from unittest.mock import patch
import pytest
@pytest.fixture(autouse=True)
def patch_requests():
"""Stub out services that makes requests."""
patch_client = patch("homeassistant.components.meteo_france.meteofranceClient")
patch_weather_alert = patch(
"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy"
)
with patch_client, patch_weather_alert:
yield
| 2.15625 | 2 |
core/spark_utils.py | malashkovv/scientific-paper-processing | 0 | 12799553 | import os
from contextlib import contextmanager
from pyspark.sql import SparkSession
from .log import logger
@contextmanager
def spark_session(config=None):
pre_spark = SparkSession.builder \
.appName('science-papers-ml') \
.master(f"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:"
f"{os.environ.get('SPARK_MASTER_PORT', '7077')}") \
if config is not None:
for key, value in config.items():
pre_spark = pre_spark.config(key, value)
spark = pre_spark.getOrCreate()
logger.info("Created Spark session")
try:
yield spark
finally:
logger.info("Stopping Spark Session")
spark.stop()
| 2.421875 | 2 |
Scripts/stackedBarChart.py | Albertios/PythonInGIS_EagleOwl | 0 | 12799554 | import matplotlib.pyplot as plt
import numpy as np
cnames = [
'#F0F8FF',
'#FAEBD7',
'#00FFFF',
'#7FFFD4',
'#F0FFFF',
'#F5F5DC',
'#FFE4C4',
'#000000',
'#FFEBCD',
'#0000FF',
'#8A2BE2',
'#A52A2A',
'#DEB887',
'#5F9EA0',
'#7FFF00',
'#D2691E',
'#FF7F50',
'#6495ED',
'#FFF8DC',
'#DC143C',
'#00FFFF',
'#00008B',
'#008B8B',
'#B8860B',
'#A9A9A9',
'#006400',
'#BDB76B',
'#8B008B',
'#556B2F',
'#FF8C00',
'#9932CC',
'#8B0000',
'#E9967A',
'#8FBC8F',
'#483D8B',
'#2F4F4F',
'#00CED1',
'#9400D3',
'#FF1493',
'#00BFFF',
'#696969',
'#1E90FF',
'#B22222',
'#FFFAF0',
'#228B22',
'#FF00FF',
'#DCDCDC',
'#F8F8FF',
'#FFD700',
'#DAA520',
'#808080',
'#008000',
'#ADFF2F',
'#F0FFF0',
'#FF69B4',
'#CD5C5C',
'#4B0082',
'#FFFFF0',
'#F0E68C',
'#E6E6FA',
'#FFF0F5',
'#7CFC00',
'#FFFACD',
'#ADD8E6',
'#F08080',
'#E0FFFF',
'#FAFAD2',
'#90EE90',
'#D3D3D3',
'#FFB6C1',
'#FFA07A',
'#20B2AA',
'#87CEFA',
'#778899',
'#B0C4DE',
'#FFFFE0',
'#00FF00',
'#32CD32',
'#FAF0E6',
'#FF00FF',
'#800000',
'#66CDAA',
'#0000CD',
'#BA55D3',
'#9370DB',
'#3CB371',
'#7B68EE',
'#00FA9A',
'#48D1CC',
'#C71585',
'#191970',
'#F5FFFA',
'#FFE4E1',
'#FFE4B5',
'#FFDEAD',
'#000080',
'#FDF5E6',
'#808000',
'#6B8E23',
'#FFA500',
'#FF4500',
'#DA70D6',
'#EEE8AA',
'#98FB98',
'#AFEEEE',
'#DB7093',
'#FFEFD5',
'#FFDAB9',
'#CD853F',
'#FFC0CB',
'#DDA0DD',
'#B0E0E6',
'#800080',
'#FF0000',
'#BC8F8F',
'#4169E1',
'#8B4513',
'#FA8072',
'#FAA460',
'#2E8B57',
'#FFF5EE',
'#A0522D',
'#C0C0C0',
'#87CEEB',
'#6A5ACD',
'#708090',
'#FFFAFA',
'#00FF7F',
'#4682B4',
'#D2B48C',
'#008080',
'#D8BFD8',
'#FF6347',
'#40E0D0',
'#EE82EE',
'#F5DEB3',
'#FFFFFF',
'#F5F5F5',
'#FFFF00',
'#9ACD32']
months = {'Jan': [],
'Feb': [],
'Mar': [],
'Apr': [],
'May': [],
'Jun': [],
'Jul': [],
'Aug': [],
'Sep': [],
'Oct': [],
'Nov': [],
'Dec': []
}
def getOwl(monthTable, ID):
result = []
for f in monthTable:
if f[0] == ID:
result.append(f)
return result
def fillNull(months):
months["Jan"].append(0)
months["Feb"].append(0)
months["Mar"].append(0)
months["Apr"].append(0)
months["May"].append(0)
months["Jun"].append(0)
months["Jul"].append(0)
months["Aug"].append(0)
months["Sep"].append(0)
months["Oct"].append(0)
months["Nov"].append(0)
months["Dec"].append(0)
return months
def fillMonths(monthTable, months):
curOwl = monthTable[0][0]
for feature in monthTable:
tempOwl = feature[0]
month = feature[2]
dist = feature[3]
owl = getOwl(monthTable, "1751")
# get all Data for one owl
# fill all month with distance
# missing data = 0 distance
months = fillNull(months)
if month == "01":
months["Jan"][len(months["Jan"])-1] = dist
if month == "02":
months["Feb"][len(months["Feb"])-1] = dist
if month == "03":
months["Mar"][len(months["Mar"])-1] = dist
if month == "04":
months["Apr"][len(months["Apr"])-1] = dist
if month == "05":
months["May"][len(months["May"])-1] = dist
if month == "06":
months["Jun"][len(months["Jun"])-1] = dist
if month == "07":
months["Jul"][len(months["Jul"])-1] = dist
if month == "08":
months["Aug"][len(months["Aug"])-1] = dist
if month == "09":
months["Sep"][len(months["Sep"])-1] = dist
if month == "10":
months["Oct"][len(months["Oct"])-1] = dist
if month == "11":
months["Nov"][len(months["Nov"])-1] = dist
if month == "12":
months["Dec"][len(months["Dec"])-1] = dist
return months
months = fillMonths(monthTable, months)
X = np.arange(12)
curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,]
counter = 0
tempOwl = "0"
lastOwl="none"
for feature in monthTable:
owl = feature[0]
if owl != tempOwl:
tempOwl = owl
t = getOwl(monthTable, feature[0])
for i in t:
month = i[2]
if month == "01":
curOwl[0] = i[3]
if month == "02":
curOwl[1] = i[3]
if month == "03":
curOwl[2] = i[3]
if month == "04":
curOwl[3] = i[3]
if month == "05":
curOwl[4] = i[3]
if month == "06":
curOwl[5] = i[3]
if month == "07":
curOwl[6] = i[3]
if month == "08":
curOwl[7] = i[3]
if month == "09":
curOwl[8] = i[3]
if month == "10":
curOwl[9] = i[3]
if month == "11":
curOwl[10] = i[3]
if month == "12":
curOwl[11] = i[3]
col = cnames[counter]
if lastOwl == "none":
plt.bar(X, curOwl, color = col)
else:
plt.bar(X, curOwl, color = col, bottom = lastOwl)
lastOwl = curOwl
counter = counter + 5
plt.show()
| 1.398438 | 1 |
scraper/storage_spiders/yensaophuyenvn.py | chongiadung/choinho | 0 | 12799555 | <reponame>chongiadung/choinho
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='nameprod']",
'price' : "//div[@class='c2']/div[@class='imp'][1]/span[@class='price']",
'category' : "//div[@class='nav_center']/span/a/span",
'description' : "//div[@class='intro']",
'images' : "//div[@class='ui-corner-all']/div/div/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = '<EMAIL>'
allowed_domains = ['yensaophuyen.vn']
start_urls = ['http://yensaophuyen.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'),
Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\d+\.html$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 1.851563 | 2 |
datasets/dataset_utils.py | SimuJenni/Correspondences | 0 | 12799556 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import pickle
import os
def save_obj(obj, save_dir, name):
with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name, file_dir):
with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f:
return pickle.load(f)
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
a TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/height': int64_feature(im_size[0]),
'image/width': int64_feature(im_size[1]),
'image/bbox': floats_feature(bbox),
'image/viewpoint': floats_feature([azimuth, elevation, theta]),
}))
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
# Initializes function that encodes RGB JPEG data.
self._encode_image_data = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def encode_jpeg(self, image_data):
image_data = image_data.astype(dtype=np.uint8)
image = self._sess.run(self._encode_jpeg,
feed_dict={self._encode_image_data: image_data})
return image
| 2.265625 | 2 |
main.py | diekmann/RFC3526_reproduce | 0 | 12799557 | #!/usr/bin/env python3
from decimal import *
from binascii import unhexlify
# need some precision to evaluate PI
getcontext().prec = 2000
# from the python docs
def pi():
"""Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
"""
getcontext().prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s # unary plus applies the new precision
### The DH values defined in the RFC
#2. 1536-bit MODP Group
#
# The 1536 bit MODP group has been used for the implementations for
# quite a long time, but was not defined in RFC 2409 (IKE).
# Implementations have been using group 5 to designate this group, we
# standardize that practice here.
#
# The prime is: 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 }
f = 2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804)
# Its hexadecimal value is:
#
p ="""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
assert Decimal(p) - f == 0
#
# The generator is: 2.
#3. 2048-bit MODP Group
#
# This group is assigned id 14.
#
# This prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
f = 2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476)
#
# Its hexadecimal value is:
#
p ="""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AACAA68 FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
assert Decimal(p) - f == 0
#
# The generator is: 2.
#4. 3072-bit MODP Group
#
# This group is assigned id 15.
#
# This prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 }
f = 2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314)
#
# Its hexadecimal value is:
#
p ="""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
assert Decimal(p) - f == 0
#
# The generator is: 2.
#5. 4096-bit MODP Group
#
# This group is assigned id 16.
#
# This prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 }
f = 2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904)
#
# Its hexadecimal value is:
#
p ="""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199
FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
assert Decimal(p) - f == 0
#
# The generator is: 2.
#6. 6144-bit MODP Group
#
# This group is assigned id 17.
#
# This prime is: 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 }
f = 2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484)
#
# Its hexadecimal value is:
#
p ="""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08
8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B
302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9
A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6
49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8
FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C
180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718
3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D
04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D
B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226
1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC
E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26
99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB
04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2
233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127
D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406
AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918
DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151
2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03
F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F
BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B
B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632
387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E
6DCC4024 FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
assert Decimal(p) - f == 0
#
# The generator is: 2.
#7. 8192-bit MODP Group
# (needs higher precision)
getcontext().prec = 4000
#
# This group is assigned id 18.
#
# This prime is: 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 }
f = 2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158)
#
# Its hexadecimal value is:
#
p = """FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD
F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831
179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B
DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF
5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6
D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3
23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328
06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C
DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE
12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4
38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300
741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568
3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9
22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B
4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A
062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36
4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1
B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92
4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47
9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71
60C980DD 98EDD3DF FFFFFFFF FFFFFFFF"""
p = int.from_bytes(unhexlify(p.replace('\n', '').replace(' ', '')), 'big')
print(Decimal(p) - f)
assert Decimal(p) - f == 0
#
# The generator is: 2.
| 3.515625 | 4 |
equipment_engineering/meter_reader_4_pointer/setup.py | afterloe/opencv-practice | 5 | 12799558 | <reponame>afterloe/opencv-practice
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
from setuptools import setup
PROJECT_NAME = "meter_reader_4_pointer"
VERSION = "1.2.0"
setup(
name=PROJECT_NAME,
version=VERSION,
packages=find_packages(),
include_package_data=True,
install_requires=["opencv-python", "pyyaml", "imutils"],
)
| 1.109375 | 1 |
setup.py | pingf/yadashcomp | 0 | 12799559 | <reponame>pingf/yadashcomp<filename>setup.py
from setuptools import setup
exec (open('yadashcomp/version.py').read())
setup(
name='yadashcomp',
version=__version__,
author='pingf',
packages=['yadashcomp'],
include_package_data=True,
license='MIT',
description='yet another dash components',
install_requires=[]
)
| 1.34375 | 1 |
play/botos3write.py | mpechner/Security-Camera | 0 | 12799560 | import boto3
import os
camera='1'
bucket='mikey.com-security'
path='/mnt/cameraimages/images'
s3 = boto3.resource('s3')
for dirName, subdirList, fileList in os.walk(path):
for fname in fileList:
if len(path) == len(dirName):
finame=fname
else:
finame = '%s/%s'%(dirName[len(path)+1:], fname)
print(finame)
res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame)
print res
os.unlink(dirName + '/' + fname)
| 2.28125 | 2 |
lib/mags_mash/utils/mags_filter.py | kbaseapps/mags_mash | 1 | 12799561 | <reponame>kbaseapps/mags_mash
from installed_clients.WorkspaceClient import Workspace
from installed_clients.DataFileUtilClient import DataFileUtil
import numpy as np
import pandas as pd
import os
from collections import defaultdict
from scipy.cluster.hierarchy import linkage, leaves_list
def create_tree(GOLD, tree_cols, dist_compl, source_order=None):
"""
"""
tree = []
if len(tree_cols) == 0:
return tree
col = tree_cols[0]
type_count = GOLD[col].value_counts().to_dict()
for t in type_count:
# if len(t) > name_max_len:
# name = t[:name_max_len] + '...'
# else:
# name = t
count = "({})".format(type_count[t])
leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order)
if leaf == []:
if col == "Project / Study Name":
mag_dict = dist_compl[t]
dist = {mag:val[0] for mag, val in mag_dict.items()}
compl = {mag:val[1] for mag, val in mag_dict.items()}
cont = {mag:val[2] for mag, val in mag_dict.items()}
else:
dist, compl, cont = "", "", ""
print("-"*90)
print('project name:',t)
print("gold stuff:",GOLD[GOLD["Project / Study Name"]==t].iloc[0])
print("-"*90)
trunc_name = GOLD[GOLD["Project / Study Name"] == t].iloc[0]['IMG Genome ID ']
# is terminal node/actually a leaf
# here we change the terminal nodes to have dists as a dict
# of IMG_id -> distance,
# and we include the list of img_id's for each
tree.append({
'truncated_name': str(trunc_name),
'name' : t,
'count': "({})".format(len(dist))
})
if source_order!=None:
tree[-1]['dist'] = dist
tree[-1]['compl'] = compl
tree[-1]['cont'] = cont
else:
children = []
for key, val in dist.items():
child = {}
child['truncated_name'] = key
child['count'] = ''
child['dist'] = val
children.append(child)
tree[-1]['children'] = children
else:
tree.append({
'truncated_name':t,
'count':count,
'children':leaf
})
if source_order!=None:
sources = []
if leaf == []:
g = GOLD[GOLD[col]==t][['upa','mag_id']]
upas = g['upa'].tolist()
ss = {}
for upa in upas:
mag_ids = g[g['upa']==upa]['mag_id'].tolist()
ss[upa] = mag_ids
for i, s in enumerate(source_order):
if s in ss:
sources.append(ss[upa])
# sources[i] = ss[upa]
else:
sources.append([])
else:
source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict()
for i, s in enumerate(source_order):
if s in source_count:
sources.append(source_count[s])
# sources[i] = source_count[s]
else:
sources.append(0)
tree[-1]['sources'] = sources
return tree
def get_location_markers(ids, source=None):
'''
For now this simply returns 1 marker with
the location of LBL. Returns list of markers
ids: list of ids
marker format:
{
'name': name of marker
'lat': latitude as a float
'lng': longitude as a float
'details': pop up details
}
'''
markers = [
{'name':"LBL", "lat":37.877344, "lng":-122.250694, "details":"This is Lawrence Berkeley National Laboratory."},
{'name':"Golden Gate Bridge", "lat": 37.817060, "lng": -122.478206, "details":"This is the Golden Gate Bridge."},
{'name':"SFO Airport", 'lat':37.616310, 'lng': -122.386793, 'details':"This is San Francisco International Airport."},
{'name':"<NAME>", "lat": 37.881523, "lng": -121.914325, "details":"This is <NAME>."}
]
if source!= None:
for m in markers:
m['source'] = "Input source:"+source
return markers
def unwind_tree(X, tree):
"""
"""
if tree.get('children'):
for t in tree['children']:
if 'compl' in t:
X.append(np.array([len(mag_ids) for mag_ids in t['sources']]))
else:
X.append(np.array(t['sources']))
X = unwind_tree(X, t)
return X
def remap_sources(sources, upa_order):
new_sources = {}
for j, i in enumerate(upa_order):
val = sources[i]
if val != 0 and val != []:
new_sources[j] = val
return new_sources
def rewind_tree(tree, upa_order):
"""
"""
for t_ix, t in enumerate(tree['children']):
new_sources = remap_sources(t['sources'], upa_order)
t['sources'] = new_sources
if t.get('children'):
t = rewind_tree(t, upa_order)
tree['children'][t_ix] = t
return tree
def get_source_order(tree, upa_names):
"""
stats:
"""
X = unwind_tree([tree['sources']], tree)
print("-"*80)
print("je suis here first:",X)
X = np.transpose(np.array(X))
print("je suis here:",X)
print('-'*80)
z = linkage(X, 'ward')
upa_order = leaves_list(z)
return upa_order
def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination):
"""
Here we do a combiantion of getting all the relevant statistics from the data csv, filtering
the outputs according to the provided inputs, and staging some of the outputs for the templates.
"""
if len(query_results) > 1:
upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys()))
else:
upa_to_name = {list(query_results.keys())[0]:""}
currdir = os.path.dirname(__file__)
gold_path = os.path.join(currdir,'data','GOLD-metadata.csv')
GOLD = pd.read_csv(gold_path)
upa_names = []
upas = []
dist_compl = {}
all_GOLD = []
# id_to_inputs = defaultdict(lambda:[])
stats = []
for upa in query_results:
upas.append(upa)
upa_name = upa_to_name[upa]
curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])]
tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\
'Ecosystem Type','Specific Ecosystem','Project / Study Name']
print("curr gold cols 1:",curr_GOLD.columns)
curr_GOLD = curr_GOLD.fillna({col:"Unknown" for col in tree_cols})
print("curr gold cols 2:",curr_GOLD.columns)
curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name)
curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination)
curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])]
curr_GOLD['upa'] = upa
print("curr gold cols 3:",curr_GOLD.columns)
# We want to get a row for each mag id in curr_GOLD,
# right now we only have a row for each img id
stats += curr_stats
# group them by img_ids
curr_GOLD.set_index('IMG Genome ID ', inplace=True)
print("curr gold cols 4:",curr_GOLD.columns)
new_gold = defaultdict(lambda: [])
for i, cs in enumerate(curr_stats):
img_id = cs['IMG_Genome_ID']
mag_id = cs['mag_id']
gold_info = curr_GOLD.loc[int(img_id),:]
new_gold['mag_id'].append(mag_id)
new_gold['IMG Genome ID '].append(img_id)
for key, val in gold_info.iteritems():
new_gold[key].append(val)
new_gold = pd.DataFrame.from_dict(new_gold)
all_GOLD.append(new_gold)
for key in curr_dist_compl:
if key in dist_compl:
for mag_key in curr_dist_compl[key]:
dist_compl[key][mag_key] = curr_dist_compl[key][mag_key]
else:
dist_compl[key] = curr_dist_compl[key]
# dist_1, compl_1, cont_1 = dist_compl[key]
# dist_2, compl_2, cont_2 = curr_dist_compl[key]
# if compl_1 == compl_2 and cont_1 == cont_2:
# # check to see distance dictionary
# unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys())))
# for uinc_key in unincluded_keys:
# dist_1[uinc_key] = dist_2[uinc_key]
# dist_compl[key] = [dist_1, compl_1, cont_1]
# else:
# raise ValueError('Same project ids but contamination and/or completeness do not match')
# # id_to_inputs[key].append(upa_name)
# else:
# dist_compl[key] = curr_dist_compl[key]
upa_names.append(upa_name)
all_GOLD = pd.concat(all_GOLD, ignore_index=True)
tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\
'Ecosystem Type','Specific Ecosystem','Project / Study Name']
if len(upas) == 1:
tree = create_tree(all_GOLD, tree_cols, dist_compl)
count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]])
tree = {"truncated_name":"", "count":"({})".format(str(count)), "count_num":count, "children":tree}
else:
tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas)
sources = [0 for _ in range(len(upa_names))]
for i in range(len(upa_names)):
for t in tree:
sources[i]+=t['sources'][i]
total_num = sum(sources)
tree = {"truncated_name":"", "count":"({})".format(str(total_num)), 'count_num':total_num, 'sources':sources, "children":tree}
upa_order = get_source_order(tree, upa_names)
tree['sources'] = remap_sources(tree['sources'], upa_order)
tree = rewind_tree(tree, upa_order)
new_upa_names = []
for i in upa_order:
new_upa_names.append(upa_names[i])
upa_names = new_upa_names
# TEMPORARY MARKER SET UP
markers = get_location_markers(set([s['mag_id'] for s in stats]))
return stats, upa_names, tree, markers
def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination):
if max_distance:
stats = [s for s in stats if s['dist'] <= max_distance]
if min_completeness:
stats = [s for s in stats if s['completeness'] >= min_completeness]
if max_contamination:
stats = [s for s in stats if s['contamination'] <= max_contamination]
stats = sorted(stats, key=lambda s: s['dist'])
if len(stats) > n_max_results:
stats = stats[:n_max_results]
dist_compl = {}
for s in stats:
if s['project'] not in dist_compl:
dist_compl[s['project']] = {}
dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]
# dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)]
else:
dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]
# print("mapping the items:",s, dist_compl[s['project']])
# if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]:
# dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3))
# else:
# raise ValueError('same project ids but contamination and/or completeness do not match',\
# round(s['completeness'],2), dist_compl[s['project']][1],
# round(s['contamination'],2), dist_compl[s['project']][2])
# dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats}
return stats, dist_compl
def get_upa_names(ws_url, cb_url, upas):
"""
"""
ws = Workspace(ws_url)
objs = ws.get_object_info3({
'objects': [{'ref':upa} for upa in upas]
})
upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']}
if len(upa_to_name)==len(upas):
return upa_to_name
missing_upas = list(set(upas) - set(list(upa_to_name.keys())))
dfu = DataFileUtil(cb_url)
objs = dfu.get_objects({'object_refs':missing_upas})['data']
if len(objs) != len(missing_upas):
raise ValueError("Could not find all input names. len upas: %s len objs: %s"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs])
for obj in objs:
info = obj['info']
upa = '/'.join([str(info[6]), str(info[0]), str(info[4])])
upa_to_name[upa] = info[1]
return upa_to_name
def get_statistics(ids, GOLD, upa_name=None):
'''
get statistics from the GOLD and statitics csvs
ids:
GOLD:
'''
output = []
currdir = os.path.dirname(__file__)
stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv')
Stats = pd.read_csv(stats_path)
curr_stats = Stats[Stats['binid'].isin(ids.keys())]
curr_stats = curr_stats.fillna('Unknown')
for id_ in ids:
curr = {}
dist, kb_id, relatedids = ids[id_]
if upa_name != None:
curr['input_name'] = upa_name
curr['dist'] = dist
# if kb_id:
# curr['kb_id'] = kb_id
# else:
# curr['kb_id'] = ''
id_stats = curr_stats[curr_stats.binid == id_]
curr['completeness'] = id_stats.iloc[0]['completeness']
curr['contamination'] = id_stats.iloc[0]['contamination']
curr['MIMAG'] = id_stats.iloc[0]['MIMAG']
curr['mag_id'] = id_
curr['IMG_Genome_ID'] = id_.split('_')[0]
img_link = "https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s"%(id_.split('_')[0], id_)
curr['IMG_link'] = img_link
if relatedids:
for key in relatedids:
if relatedids[key]:
curr[key] = relatedids[key]
else:
curr[key] = 'Unknown'
if relatedids['GOLD_Analysis_ID']:
curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name']
else:
curr['project'] = 'Unknown'
output.append(curr)
return output
| 2.453125 | 2 |
codewars/7kyu/dinamuh/SpeedControl/test.py | dinamuh/Training_one | 0 | 12799562 | <filename>codewars/7kyu/dinamuh/SpeedControl/test.py
from main import gps
def test_gps(benchmark):
assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41
assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219
assert benchmark(gps, 20,
[0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6,
3.84]) == 80
assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94,
3.15]) == 90
assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72
assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72
assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88
assert benchmark(gps, 16,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76
assert benchmark(gps, 17,
[0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0,
4.25, 4.5, 4.75]) == 82
assert benchmark(gps, 19,
[0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58
assert benchmark(gps, 19, []) == 0
assert benchmark(gps, 19, [0.0]) == 0
| 2.59375 | 3 |
thelper/gui/utils.py | crim-ca/thelper | 0 | 12799563 | """Graphical User Interface (GUI) utility module.
This module contains various tools and utilities used to instantiate annotators and GUI elements.
"""
import logging
import thelper.utils
logger = logging.getLogger(__name__)
def create_key_listener(callback):
"""Returns a key press listener based on pynput.keyboard (used for mocking)."""
import pynput.keyboard
return pynput.keyboard.Listener(on_press=callback)
def create_annotator(session_name, save_dir, config, datasets):
"""Instantiates a GUI annotation tool based on the type contained in the config dictionary.
The tool type is expected to be in the configuration dictionary's `annotator` field, under the `type` key. For more
information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be
compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will
be given the full config dictionary.
Args:
session_name: name of the annotation session used for printing and to create output directories.
save_dir: path to the session directory where annotations and other outputs will be saved.
config: full configuration dictionary that will be parsed for annotator parameters.
datasets: map of named dataset parsers that will provide the data to annotate.
Returns:
The fully-constructed annotator object, ready to begin annotation via its ``run()`` function.
.. seealso::
| :class:`thelper.gui.annotators.Annotator`
"""
if "annotator" not in config or not config["annotator"]:
raise AssertionError("config missing 'annotator' field")
annotator_config = config["annotator"]
if "type" not in annotator_config or not annotator_config["type"]:
raise AssertionError("annotator config missing 'type' field")
annotator_type = thelper.utils.import_class(annotator_config["type"])
return annotator_type(session_name, config, save_dir, datasets)
| 2.90625 | 3 |
exts/__init__.py | rawmeat898/Quote-Finder | 0 | 12799564 | <gh_stars>0
from configparser import ConfigParser
config = ConfigParser()
config.read('data/channels.ini')
| 1.445313 | 1 |
acf_example/httpbin_client/actions/request_inspection.py | Jamim/acf | 5 | 12799565 | from .base import HttpbinAction
class GetHeadersAction(HttpbinAction):
METHOD = 'GET'
URL_COMPONENTS = ['headers']
RESULT_KEY = 'headers'
class GetIPAction(HttpbinAction):
METHOD = 'GET'
URL_COMPONENTS = ['ip']
RESULT_KEY = 'origin'
class GetUserAgentAction(HttpbinAction):
METHOD = 'GET'
URL_COMPONENTS = ['user-agent']
RESULT_KEY = 'user-agent'
| 2.21875 | 2 |
session_1/Test.py | idlaviV/intro-to-python | 0 | 12799566 | for index, entry in enumerate({"Tam", "Tim", "Tom"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
| 3.9375 | 4 |
rllib/agent/on_policy/expected_actor_critic_agent.py | shenao-zhang/DCPU | 8 | 12799567 | """Implementation of Expected-Actor Critic Agent."""
from rllib.algorithms.eac import ExpectedActorCritic
from .actor_critic_agent import ActorCriticAgent
class ExpectedActorCriticAgent(ActorCriticAgent):
"""Implementation of the Advantage-Actor Critic.
TODO: build compatible function approximation.
References
----------
<NAME>., & <NAME>. (2018).
Expected policy gradients. AAAI.
"""
def __init__(self, *args, **kwargs):
super().__init__(algorithm_=ExpectedActorCritic, *args, **kwargs)
| 2.328125 | 2 |
Keras_Code/model/nets.py | trungvdhp/mpsnet | 0 | 12799568 | from model.adacos import AdaCos
from model.blocks import NetBlock
from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
class Net:
def __init__(self, config):
self.model_name = config.model_name
self.start_fine_tune_layer_id = config.start_fine_tune_layer_id
self.end_fine_tune_layer_id = config.end_fine_tune_layer_id
self.embedding_dim = config.embedding_dim
self.embedding_layer_name = config.embedding_layer_name
self.dropout = config.dropout
self.net_blocks = NetBlock(config)
def build_mpsnet_backbone(self, input_shape):
c = [32, 32, 64, 64, 128]
t = [1, 2, 2, 3, 2]
s = [2, 2, 2, 2, 1]
n = [1, 2, 2, 3, 2]
activation='relu'
I = Input(shape = input_shape)
M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)
M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation)
M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None)
A1 = add([M0, M1])
M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)
A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None)
A2 = add([A1, M2])
M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)
A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None)
A3 = add([A2, M3])
M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)
A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None)
A4 = add([A3, M4])
M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4])
self.backbone = Model(inputs=I, outputs=M, name=self.model_name)
def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0):
I = Input(shape = input_shape)
activation = 'relu'
c = int(32 * alpha)
x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation)
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0):
c = [32, 16, 24, 32, 64, 96, 160, 320, 1280]
t = [1, 1, 6, 6, 6, 6, 6, 6, 1]
s = [2, 1, 2, 2, 2, 1, 2, 1, 1]
n = [1, 1, 2, 3, 4, 3, 3, 1, 1]
activation = 'relu6'
I = Input(shape = input_shape)
n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8)
x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32)
x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation)
if alpha > 1.0:
last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8)
else:
last_filters = c[8]
x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation)
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0):
I = Input(shape = input_shape)
x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish')
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilefacenet_backbone(self, input_shape, alpha=1.0):
c = [64, 64, 64, 128, 128, 128, 128]
t = [1, 1, 2, 4, 2, 4, 2]
s = [2, 1, 2, 2, 1, 2, 1]
n = [1, 1, 5, 1, 6, 1, 2]
activation='prelu'
I = Input(shape = input_shape)
x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)
x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation)
x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation)
ks = K.int_shape(x)[2]
x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_softmax_model(self, n_classes):
I=self.backbone.inputs
x=self.backbone.outputs[0]
if(len(x.shape)==2):
c = K.int_shape(x)[self.net_blocks.channel_axis]
x = Reshape((1, 1, c))(x)
x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None)
if(self.dropout>0):
x = Dropout(rate=dropout)(x)
x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None)
x = Reshape((n_classes,))(x)
self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name)
def build_adacos_model(self):
label = Input(shape=(1,), name='label_input')
softmax = self.softmax_model.outputs[0]
n_classes = K.int_shape(softmax)[-1]
inputs = self.softmax_model.inputs[0]
x = self.softmax_model.layers[self.end_fine_tune_layer_id].output
if(self.dropout>0):
x = Dropout(rate=dropout)(x)
x = Flatten(name=self.embedding_layer_name)(x)
break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id
for layer in self.softmax_model.layers[:break_point]:
layer.trainable=False
outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label])
self.adacos_model = Model(inputs = (inputs, label), outputs = outputs, name=self.model_name)
| 2.328125 | 2 |
colour.py | joshuawise610/PythonColour | 1 | 12799569 | """A wrapper for the colorama module."""
"""
Copyright 2019 - 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from colorama import init, Fore, Back, Style
import os
def printcol(text, fore_col=None, back_col=None, shade=None, end=None):
"""A function which prints the text in the specified colour on the specified background.
Arguments:
text - The text to print to the screen in the required format.
fore_col - The colour of the text to print the text in. Default: white, can be either of: red, light red, magenta,
light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
back_col - The colour to print the text onto. Default: black, can be either of: red, light red, magenta, light
magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
shade - The shade of the colour to use. Default: normal, can be either of: dim, normal, bright
end - What character to end the print line with. By default this is the newline character. This can be set to an
empty string to change the colour of the text being printed out.
"""
# Handle the keyword arguments so that they still work correctly when the terminal is used, this allows any not
# defined to be set to the default. E.G. It is possible to run printcol("Some text") and still get some output,
# This will be white text using the normal shade on a white background, this is normal print for cmd, but may be
# different for other terminals.
if fore_col is None:
fore_col = "white"
if back_col is None:
back_col = "black"
if shade is None:
shade = "normal"
if end is None:
end = "\n"
# Convert the inputs into lowercase names to be checked
fore_col = fore_col.lower()
back_col = back_col.lower()
shade = shade.lower()
# Check if running from pycharm
is_running_pycharm = "PYCHARM_HOSTED" in os.environ
if is_running_pycharm:
convert = False
strip = False
else:
convert = None
strip = None
init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement runs correctly
# Define values for each style and colour
shades = {"dim": Style.DIM, "bright": Style.BRIGHT, "normal": Style.NORMAL} # When underline is available add Style.UNDERLINED
fore_cols = {"red": Fore.RED, "light red": Fore.LIGHTRED_EX, "magenta": Fore.MAGENTA, "light magenta": Fore.LIGHTMAGENTA_EX, "yellow": Fore.YELLOW, "light yellow": Fore.LIGHTYELLOW_EX, "green": Fore.GREEN, "light green": Fore.LIGHTGREEN_EX, "blue": Fore.BLUE, "light blue": Fore.LIGHTBLUE_EX, "cyan": Fore.CYAN, "light cyan": Fore.LIGHTCYAN_EX, "black": Fore.BLACK}
back_cols = {"red": Back.RED, "light red": Back.LIGHTRED_EX, "magenta": Back.MAGENTA, "light magenta": Back.LIGHTMAGENTA_EX, "yellow": Back.YELLOW, "light yellow": Back.LIGHTYELLOW_EX, "green": Back.GREEN, "light green": Back.LIGHTGREEN_EX, "blue": Back.BLUE, "light blue": Back.LIGHTBLUE_EX, "cyan": Back.CYAN, "light cyan": Back.LIGHTCYAN_EX, "white": Back.WHITE}
# Check the shade of colour to use
if shade in shades:
shade = shades[shade]
else:
shade = Style.NORMAL
# Check the foreground colour to use
if fore_col in fore_cols:
fore_col = fore_cols[fore_col]
else:
fore_col = Fore.WHITE
# Check the background colour to use
if back_col in back_cols:
back_col = back_cols[back_col]
else:
back_col = Back.BLACK
# Then print the text to the screen
print(shade + fore_col + back_col + text, end=end)
def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None):
"""A Function which takes a list and iterates through the list and prints it out in coloured text. The
colours and shade to use can be provided as a list or as a sting.
Arguments:
list_to_print - A iterable list of strings or numbers to print out.
fore_col - A list of strings or a single string to use as the text colour for the strings being printed.
Default White, colours same as printcol
back_col - A list of strings or a single string to use as the background text colour for the strings being
printed. Default Black, colours same as printcol
shade - A list of strings or a single string to use as the shade of the text colour for the string.
Default Normal, options same as printcol
end - A list of strings or a single string to use as the separator between the strings being printed.
Default Newline, this list must be passed for the system to work correctly
"""
# Check the keyword arguments are None and then set the defaults.
if fore_col is None:
fore_col = "white"
if back_col is None:
back_col = "black"
if shade is None:
shade = "normal"
if end is None:
end = "\n"
# Check the lists are of the correct length before attempting the iteration
if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end):
# Then print out each item as required in its colour
for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end):
# Print the item
printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending)
else:
# The lists are not of all equal length so print an error message in red.
printcol("Please use lists of equal length.")
def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None):
"""Returns input from a coloured input prompt.
Arguments:
text - The text to prompt the user for the desired input.
prompt_fore_col - The colour of the text to print the prompt text in. Default: white, can be either of: red, light
red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or
white
prompt_back_col - The colour to print the prompt text onto. Default: black, can be either of: red, light red,
magenta, light magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
prompt_shade - The shade of the colour to use for the input prompt. Default: normal, can be either of: dim, normal,
bright
input_fore_col - The colour of the text to print the user input in. Default: white, can be either of: red, light
red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or
white
input_back_col - The colour to print the user input onto. Default: black, can be either of: red, light red,
magenta, light magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
input_shade - The shade of the colour to use for the text entered by the user. Default: normal, can be either of:
dim, normal, bright"""
# Handle None keywords
if prompt_fore_col is None:
prompt_fore_col = "white"
if prompt_back_col is None:
prompt_back_col = "black"
if prompt_shade is None:
prompt_shade = "normal"
if input_fore_col is None:
input_fore_col = "white"
if input_back_col is None:
input_back_col = "black"
if input_shade is None:
input_shade = "normal"
# Convert the inputs into lowercase names to be checked
prompt_fore_col = prompt_fore_col.lower()
prompt_back_col = prompt_back_col.lower()
prompt_shade = prompt_shade.lower()
input_fore_col = input_fore_col.lower()
input_back_col = input_back_col.lower()
input_shade = input_shade.lower()
# Check if running from pycharm
is_running_pycharm = "PYCHARM_HOSTED" in os.environ
if is_running_pycharm:
convert = False
strip = False
else:
convert = None
strip = None
init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly
# Define values for each style and colour
shades = {"dim": Style.DIM, "bright": Style.BRIGHT, "normal": Style.NORMAL} # When underline is available add Style.UNDERLINED
fore_cols = {"red": Fore.RED, "light red": Fore.LIGHTRED_EX, "magenta": Fore.MAGENTA, "light magenta": Fore.LIGHTMAGENTA_EX, "yellow": Fore.YELLOW, "light yellow": Fore.LIGHTYELLOW_EX, "green": Fore.GREEN, "light green": Fore.LIGHTGREEN_EX, "blue": Fore.BLUE, "light blue": Fore.LIGHTBLUE_EX, "cyan": Fore.CYAN, "light cyan": Fore.LIGHTCYAN_EX, "black": Fore.BLACK}
back_cols = {"red": Back.RED, "light red": Back.LIGHTRED_EX, "magenta": Back.MAGENTA, "light magenta": Back.LIGHTMAGENTA_EX, "yellow": Back.YELLOW, "light yellow": Back.LIGHTYELLOW_EX, "green": Back.GREEN, "light green": Back.LIGHTGREEN_EX, "blue": Back.BLUE, "light blue": Back.LIGHTBLUE_EX, "cyan": Back.CYAN, "light cyan": Back.LIGHTCYAN_EX, "white": Back.WHITE}
# Check which shade of colour to use for the input prompt and the user input.
if prompt_shade in shades:
prompt_shade = shades[prompt_shade]
else:
prompt_shade = Style.NORMAL
if input_shade in shades:
input_shade = shades[input_shade]
else:
input_shade = Style.NORMAL
# Check each foreground colour to use
if prompt_fore_col in fore_cols:
prompt_fore_col = fore_cols[prompt_fore_col]
else:
prompt_fore_col = Fore.WHITE
if input_fore_col in fore_cols:
input_fore_col = fore_cols[input_fore_col]
else:
input_fore_col = Fore.WHITE
# Check each background colour to use
if prompt_back_col in back_cols:
prompt_back_col = back_cols[prompt_back_col]
else:
prompt_back_col = Back.BLACK
if input_back_col in back_cols:
input_back_col = back_cols[input_back_col]
else:
input_back_col = Back.BLACK
print(prompt_shade + prompt_fore_col + prompt_back_col, end='')
show_text = str(text) + " " + Style.RESET_ALL # Force the text to string and add a space for styling
show_text + input_shade + input_fore_col + input_back_col
return_text = input(show_text) # Show the text
print(Style.RESET_ALL) # Reset for normal
return return_text
def testcolour(use_string=None):
"""A function which is used to test the colour printing of the shell by printing a string in different colours onto
different backgrounds.
Arguments:
use_string - The string to use for testing the console prints text correctly in all colours. Default:
'Hello World'."""
if use_string is None:
use_string = "Hello World"
printcol(use_string, "red", "black", "dim")
printcol(use_string, "red", "black", "normal")
printcol(use_string, "red", "black", "bright")
printcol(use_string, "magenta", "black", "dim")
printcol(use_string, "magenta", "black", "normal")
printcol(use_string, "magenta", "black", "bright")
printcol(use_string, "yellow", "black", "dim")
printcol(use_string, "yellow", "black", "normal")
printcol(use_string, "yellow", "black", "bright")
printcol(use_string, "green", "black", "dim")
printcol(use_string, "green", "black", "normal")
printcol(use_string, "green", "black", "bright")
printcol(use_string, "cyan", "black", "dim")
printcol(use_string, "cyan", "black", "normal")
printcol(use_string, "cyan", "black", "bright")
printcol(use_string, "blue", "black", "dim")
printcol(use_string, "blue", "black", "normal")
printcol(use_string, "blue", "black", "bright")
printcol(use_string, "white", "black", "dim")
printcol(use_string, "white", "black", "normal")
printcol(use_string, "white", "black", "bright")
printcol(use_string, "black", "white", "dim")
printcol(use_string, "black", "white", "normal")
printcol(use_string, "black", "white", "bright")
| 3.46875 | 3 |
Cryptography/MorseCode/morsecode.py | AoWangDrexel/PiApproximationExperiments | 0 | 12799570 | <gh_stars>0
"""
Author: <NAME>
Date: 08/12/19
Description: Morse code encryper and decryter
"""
# The function retrieves the morse code from a text file and cleans it up so the letters
# can be stored in the keys and code into the values
def loadMorseTable():
codes = ""
with open("morseTable.txt", "r") as code:
codes = code.read()
# removes the escape sequences and white spaces
codes.split("\n")
codes = codes.split()
keys = []
values = []
for i in range(len(codes)):
if(i % 2 == 0):
keys.append(codes[i])
else:
values.append(codes[i])
# creates the morse code dictionary
morse_dict = {}
for i in range(len(keys)):
morse_dict[keys[i]] = values[i]
# The function encrypts the plaintext into ciphertext and returns the string
def encrypt(plain_text):
word = ""
plain_text = plain_text.upper()
for letter in plain_text:
if(letter in morse_dict):
# each letter is separated by a space
word += morse_dict.get(letter) + " "
else:
# each word is seperated by 7 spaces
word += " "
return word
# The function returns the key by inputting a value of the key
# if values not in dictionary, return -1
def values_by_keys(dictionary, value):
for keys, values in dictionary.items():
if(value == values):
return keys
return -1
# The function decrypts the ciphertext
def decrypt(cipher_text):
decoded = ""
# creates a list by seperating each letter by the space
cipher_text = cipher_text.split(" ")
for code in cipher_text:
# if a space is in the elements of the list, add a space to the
# decoded message and remove the space from the element, so it can be identified
if " " in code:
decoded += " " + values_by_keys(morse_dict, code.strip())
decoded += str(values_by_keys(morse_dict, code))
# the space in the code is not a morse symbol, so the values_by_keys method returns -1
# therefore replaces the 7 spaces of between the words that became 7 -1's to become a single space
decoded = decoded.replace("-1-1-1-1-1-1-1"," ")
return decoded
# intro
def main():
loadMorseTable()
print("Welcome to the Morse Code Encoding and Decoder!")
print("CAUTION: Please seperate each morse symbol with a space and each word by seven spaces\n")
choice = input("Would you like to encrypt (e) or decrypt (d)? ")
print()
if(choice == "e"):
text = input("What would you like to encrypt? ")
print()
print("Plaintext: " + text)
print("Ciphertext: " + encrypt(text))
elif(choice == "d"):
text = input("What would you like to decrypt? ")
print()
print("Ciphertext: " + text)
print("Plaintext: "+ decrypt(text))
else:
print("There must have been a problem! Please try again")
if __name__ == "__main__":
main()
| 4.125 | 4 |
ares/simulations/MultiPhaseMedium.py | astrojhgu/ares | 1 | 12799571 | <filename>ares/simulations/MultiPhaseMedium.py
"""
MultiPhaseMedium.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Feb 16 12:46:28 MST 2015
Description:
"""
import numpy as np
from .GasParcel import GasParcel
from ..util import ParameterFile, ProgressBar
from ..util.ReadData import _sort_history, _load_inits
from .MetaGalacticBackground import MetaGalacticBackground
from ..util.SetDefaultParameterValues import MultiPhaseParameters
_mpm_defs = MultiPhaseParameters()
class MultiPhaseMedium(object):
def __init__(self, **kwargs):
"""
Initialize a MultiPhaseMedium object.
By default, this is a two-zone model, consisting of a "bulk IGM"
grid patch and an "HII regions" grid patch, dubbed "igm" and "cgm",
respectively. To perform a single-zone calculation, simply set
``include_cgm=False`` or ``include_igm=False``.
"""
if 'load_ics' not in kwargs:
kwargs['load_ics'] = True
self.kwargs = kwargs
@property
def pf(self):
if not hasattr(self, '_pf'):
inits = self.inits
self._pf = ParameterFile(**self.kwargs)
return self._pf
@property
def inits(self):
if not hasattr(self, '_inits'):
self._inits = inits = _load_inits()
zi = self.pf['initial_redshift']
if not np.all(np.diff(inits['z']) > 0):
raise ValueError('Redshifts in ICs must be in ascending order!')
Ti = np.interp(zi, inits['z'], inits['Tk'])
xi = np.interp(zi, inits['z'], inits['xe'])
#if self.pf['include_He']:
new = {'igm_initial_temperature': Ti,
'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]}
self.kwargs.update(new)
#else:
# new_pars = {'cosmological_ics': False,
# 'igm_initial_temperature': Ti,
# 'igm_initial_ionization': [1. - xi, xi]}
#
#self.kwargs.update(new_pars)
return self._inits
@property
def field(self):
if not hasattr(self, '_field'):
if self.pf['include_igm']:
self._field = MetaGalacticBackground(grid=self.parcel_igm.grid,
**self.kwargs)
else:
self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid,
**self.kwargs)
return self._field
@property
def pops(self):
return self.field.pops
@property
def grid(self):
return self.field.grid
@property
def parcels(self):
if not hasattr(self, '_parcels'):
self._initialize_zones()
return self._parcels
@property
def parcel_igm(self):
if not hasattr(self, '_parcel_igm'):
self._parcel_igm = self.parcels[0]
return self._parcel_igm
@property
def parcel_cgm(self):
if not hasattr(self, '_parcel_cgm'):
if self.pf['include_igm']:
self._parcel_cgm = self.parcels[1]
else:
self._parcel_cgm = self.parcels[0]
return self._parcel_cgm
def rates_no_RT(self, grid):
_rates_no_RT = \
{'k_ion': np.zeros((grid.dims, grid.N_absorbers)),
'k_heat': np.zeros((grid.dims, grid.N_absorbers)),
'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)),
}
return _rates_no_RT
@property
def tf(self):
if not hasattr(self, '_tf'):
z = self.pf['initial_redshift']
zf = self.pf['final_redshift']
self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z)
self.pf['stop_time'] = self._tf / self.pf['time_units']
return self._tf
def _initialize_zones(self):
"""
Initialize (up to two) GasParcels.
"""
# Reset stop time based on final redshift.
z = self.pf['initial_redshift']
zf = self.pf['final_redshift']
self._parcels = []
for zone in ['igm', 'cgm']:
if not self.pf['include_%s' % zone]:
continue
kw = self.pf.copy()
# Loop over defaults, pull out the ones for this zone
for key in _mpm_defs:
if key[0:4] != '%s_' % zone:
continue
# Have to rename variables so Grid class will know them
grid_key = key.replace('%s_' % zone, '')
if key in self.kwargs:
kw[grid_key] = self.kwargs[key]
else:
kw[grid_key] = _mpm_defs[key]
if zone == 'igm':
self.kw_igm = kw.copy()
parcel_igm = GasParcel(**self.kw_igm)
self.gen_igm = parcel_igm.step()
# Set initial values for rate coefficients
parcel_igm.update_rate_coefficients(parcel_igm.grid.data,
**self.rates_no_RT(parcel_igm.grid))
self._parcels.append(parcel_igm)
else:
self.kw_cgm = kw.copy()
parcel_cgm = GasParcel(**self.kw_cgm)
parcel_cgm.grid.set_recombination_rate(True)
parcel_cgm._set_chemistry()
self.gen_cgm = parcel_cgm.step()
parcel_cgm.chem.chemnet.monotonic_EoR = \
self.pf['monotonic_EoR']
parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data,
**self.rates_no_RT(parcel_cgm.grid))
self._parcels.append(parcel_cgm)
self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units']
@property
def zones(self):
if not hasattr(self, '_zones'):
self._zones = int(self.pf['include_igm']) \
+ int(self.pf['include_cgm'])
return self._zones
@property
def default_parcel(self):
if not hasattr(self, '_default_parcel'):
self._default_parcel = self.parcel_igm if self.pf['include_igm'] \
else self.parcel_cgm
return self._default_parcel
@property
def dynamic_tau(self):
return self.pf['tau_dynamic']
def update_optical_depth(self):
"""
Dynamically update optical depth as simulation runs.
"""
# Recall that self.field.tau is a list with as many elements as there
# are distinct populations
tau = []
for i in range(self.field.Npops):
pass
self.field.tau = tau
def subcycle(self):
"""
See if we need to re-do the previous timestep.
This mean:
(1) Re-compute the IGM optical depth.
(2)
"""
return False
# Check IGM ionization state between last two steps.
# Converged to desired tolerance?
#self.
def _stop_criteria_met(self):
pass
def run(self):
"""
Run simulation from start to finish.
Returns
-------
Nothing: sets `history` attribute.
"""
self._insert_inits()
pb = ProgressBar(self.tf, use=self.pf['progress_bar'])
pb.start()
# Evolve in time
for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step():
pb.update(t)
# Save data
self.all_z.append(z)
self.all_t.append(t)
if self.pf['include_cgm']:
self.all_data_cgm.append(data_cgm.copy())
if self.pf['include_igm']:
self.all_data_igm.append(data_igm.copy())
if self.pf['save_rate_coefficients']:
if self.pf['include_cgm']:
self.all_RCs_cgm.append(RC_cgm.copy())
if self.pf['include_igm']:
self.all_RCs_igm.append(RC_igm.copy())
pb.finish()
# Sort everything by time
if self.pf['include_igm']:
self.history_igm = \
_sort_history(self.all_data_igm, prefix='igm_', squeeze=True)
self.history = self.history_igm.copy()
else:
self.history = {}
if self.pf['include_cgm']:
self.history_cgm = \
_sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True)
self.history.update(self.history_cgm)
# Save rate coefficients [optional]
if self.pf['save_rate_coefficients']:
if self.pf['include_igm']:
self.rates_igm = \
_sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True)
self.history.update(self.rates_igm)
if self.pf['include_cgm']:
self.rates_cgm = \
_sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True)
self.history.update(self.rates_cgm)
self.history['t'] = np.array(self.all_t)
self.history['z'] = np.array(self.all_z)
def step(self):
"""
Generator for a two-phase intergalactic medium.
Returns
-------
Tuple containing the current time, redshift, and dictionaries for the
IGM and CGM data at a single snapshot.
"""
t = 0.0
z = self.pf['initial_redshift']
dt = self.pf['time_units'] * self.pf['initial_timestep']
zf = self.pf['final_redshift']
# Read initial conditions
if self.pf['include_igm']:
data_igm = self.parcel_igm.grid.data.copy()
if self.pf['include_cgm']:
data_cgm = self.parcel_cgm.grid.data.copy()
# Evolve in time!
while z > zf:
# Increment time / redshift
dtdz = self.default_parcel.grid.cosm.dtdz(z)
t += dt
z -= dt / dtdz
# The (potential) generators need this
self.field.update_redshift(z)
# IGM rate coefficients
if self.pf['include_igm']:
done = False
if self.pf['stop_igm_h_2'] is not None:
if data_igm['h_2'] > self.pf['stop_igm_h_2']:
data_igm = data_igm_pre.copy()
dt1 = 1e50
done = True
if not done:
RC_igm = self.field.update_rate_coefficients(z,
zone='igm', return_rc=True, igm_h_1=data_igm['h_1'])
# Now, update IGM parcel
t1, dt1, data_igm = self.gen_igm.next()
# Pass rate coefficients off to the IGM parcel
self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm)
else:
dt1 = 1e50
RC_igm = data_igm = None
data_igm = {'h_1': 1.0}
if self.pf['include_cgm']:
done = False
if self.pf['stop_cgm_h_2'] is not None:
if data_cgm['h_2'] > self.pf['stop_cgm_h_2']:
data_cgm = data_cgm_pre.copy()
dt2 = 1e50
done = True
if not done:
# CGM rate coefficients
RC_cgm = self.field.update_rate_coefficients(z,
zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1'])
# Pass rate coefficients off to the CGM parcel
self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm)
# Now, update CGM parcel
t2, dt2, data_cgm = self.gen_cgm.next()
else:
dt2 = 1e50
RC_cgm = data_cgm = None
# Must update timesteps in unison
dt_pre = dt * 1.
dt = min(dt1, dt2)
dt = min(dt, self.pf['max_timestep'] * self.pf['time_units'])
# Might need these...
if self.pf['include_igm']:
data_igm_pre = data_igm.copy()
if self.pf['include_cgm']:
data_cgm_pre = data_cgm.copy()
# If we're computing the IGM optical depth dynamically, we may
# need to "re-do" this step to ensure convergence.
redo = self.subcycle()
if not redo:
# Changing attribute! A little scary, but we must make sure
# these parcels are evolved in unison
if self.pf['include_igm']:
self.parcel_igm.dt = dt
if self.pf['include_cgm']:
self.parcel_cgm.dt = dt
yield t, z, data_igm, data_cgm, RC_igm, RC_cgm
continue
# If we've made it here, we need to trick our generators a bit
# "undo" this time-step
t -= dt_pre
z += dt_pre / dtdz
self.update_optical_depth()
def _insert_inits(self):
"""
Prepend provided initial conditions to the data storage lists.
"""
if not self.pf['load_ics']:
self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \
[], [], [], []
if self.pf['save_rate_coefficients']:
self.all_RCs_igm, self.all_RCs_cgm = [], []
if not self.pf['include_cgm']:
del self.all_RCs_cgm, self.all_data_cgm
return
# Flip to descending order (in redshift)
z_inits = self.inits['z'][-1::-1]
Tk_inits = self.inits['Tk'][-1::-1]
xe_inits = self.inits['xe'][-1::-1]
inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits}
# Stop pre-pending once we hit the first light redshift
i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift']))
if z_inits[i_trunc] <= self.pf['initial_redshift']:
i_trunc += 1
self.all_t = []
self.all_data_igm = []
self.all_z = list(z_inits[0:i_trunc])
self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)
self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)
# Don't mess with the CGM (much)
if self.pf['include_cgm']:
tmp = self.parcel_cgm.grid.data
self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))]
for i, cgm_data in enumerate(self.all_data_cgm):
self.all_data_cgm[i]['rho'] = \
self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i])
self.all_data_cgm[i]['n'] = \
self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i])
if not self.pf['include_igm']:
return
# Loop over redshift and derive things for the IGM
for i, red in enumerate(self.all_z):
snapshot = {}
for key in self.parcel_igm.grid.data.keys():
if key in self.inits.keys():
snapshot[key] = inits_all[key][i]
continue
# Electron fraction
snapshot['e'] = inits_all['xe'][i]
# Hydrogen neutral fraction
xe = inits_all['xe'][i]
if 2 not in self.parcel_igm.grid.Z:
xe = min(xe, 1.0)
xi = xe / (1. + self.parcel_igm.grid.cosm.y)
snapshot['h_1'] = 1. - xi
snapshot['h_2'] = xi
# Add helium, assuming xHeII = xHII, and xHeIII << 1
if self.parcel_igm.pf['include_He']:
snapshot['he_1'] = 1. - xi
snapshot['he_2'] = xi
snapshot['he_3'] = 1e-10
snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red)
snapshot['n'] = \
self.parcel_igm.grid.particle_density(snapshot.copy(), red)
self.all_t.append(0.0)
self.all_data_igm.append(snapshot.copy())
| 2.25 | 2 |
web/migrations/0042_auto_20180808_1500.py | ocwc/oerweekapi | 0 | 12799572 | <filename>web/migrations/0042_auto_20180808_1500.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-08 15:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0041_auto_20180808_1450'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='post_id',
field=models.IntegerField(default=0),
),
]
| 1.234375 | 1 |
cata/plotters/unified_plotter.py | seblee97/student_teacher_catastrophic | 2 | 12799573 | import os
from typing import Dict
from typing import Optional
from typing import Union
import pandas as pd
from cata import constants
from cata.plotters import base_plotter
class UnifiedPlotter(base_plotter.BasePlotter):
"""Class for plotting generalisation errors, overlaps etc.
For case when logging is done in 'unified' fashion i.e. all into one dataframe.
"""
def __init__(
self,
save_folder: str,
num_steps: int,
log_overlaps: bool,
ode_log_path: str,
network_log_path: str,
):
"""
Class constructor.
Args:
save_folder: path to folder for saving plots.
num_steps: total number of steps in the training run (used for scaling axes).
log_overlaps: whether or not to plot overlaps (or just errors).
log_ode: whether ot not to plot ode data.
log_network: whether ot not to plot network data.
"""
self._ode_logger_path = ode_log_path
self._network_logger_path = network_log_path
super().__init__(
save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps
)
def _setup_data(self):
"""Setup data from relevant dataframes.
Here, in the unified case, full dataset is loaded into memory.
"""
if self._ode_logger_path is not None:
self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0)
if self._network_logger_path is not None:
self._network_logger = pd.read_csv(self._network_logger_path)
def make_plots(self) -> None:
"""Orchestration method for plotting ode logs, network logs, or both."""
if self._ode_logger_path is not None:
self._make_plot(
data={constants.ODE: self._ode_logger},
save_path=os.path.join(self._save_folder, constants.ODE_PDF),
)
if self._network_logger_path is not None:
self._make_plot(
data={constants.SIM: self._network_logger},
save_path=os.path.join(self._save_folder, constants.NETWORK_PDF),
)
if self._ode_logger_path is not None and self._network_logger_path is not None:
self._make_plot(
data={
constants.ODE: self._ode_logger,
constants.SIM: self._network_logger,
},
save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF),
)
def _make_plot(
self,
data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
save_path: str,
) -> None:
"""Make plots for a set of results (e.g. ode or network or both).
Args:
data: mapping from type of results (ode, network etc.)
to dataframes with results.
save_path: path to save the plot.
"""
# can use arbitrary dataframe since columns will be the same.
tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys()))
# e.g. [error, overlap, ...]
group_names = list(tag_groups.keys())
# e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...]
group_key_names = list(tag_groups.values()) # e.g.
num_graphs = len(tag_groups)
num_rows = self.GRAPH_LAYOUT[0]
num_columns = self.GRAPH_LAYOUT[1]
fig, spec = self._get_figure_skeleton(
height=4, width=5, num_columns=num_columns, num_rows=num_rows
)
for row in range(num_rows):
for col in range(num_columns):
graph_index = (row) * num_columns + col
if graph_index < num_graphs:
print("Plotting graph {}/{}".format(graph_index + 1, num_graphs))
group_name = group_names[graph_index]
keys = group_key_names[graph_index]
data_collection = {
data_type: {key: data[data_type][key].dropna() for key in keys}
for data_type in data.keys()
}
fig = self._plot_scalar(
fig=fig,
spec=spec,
row=row,
col=col,
tag_group_name=group_name,
data_collection=data_collection,
)
fig.savefig(save_path, dpi=100)
| 2.578125 | 3 |
Zad_Decorator/Potwierdzenie.py | Paarzivall/Wzorce-Projektowe | 0 | 12799574 | from Komponent import Komponent
class Potwierdzenie(Komponent):
def drukuj(self):
print("POTWIERDZENIE")
| 1.890625 | 2 |
helpers.py | janosh/auto-repo-config | 0 | 12799575 | import os
from typing import Any
import requests
import yaml
if os.path.exists("gh_token.py"):
from gh_token import GH_TOKEN
else:
GH_TOKEN = os.environ["GH_TOKEN"]
headers = {"Authorization": f"token {GH_TOKEN}"}
def query_gh_gpl_api(query: str) -> dict:
"""Query the GitHub GraphQL API.
Args:
query (str): Multi-line query string. Use triple-quotes. Minimal example:
'''
{
viewer {
login
}
}
'''
Raises:
Exception: If the query returned an error message.
Returns:
dict: The data returned by the API.
"""
response = requests.post(
"https://api.github.com/graphql", json={"query": query}, headers=headers
).json()
if "errors" in response:
err = response["errors"][0]["message"]
raise Exception(f"Request failed with error '{err}'.")
else:
return response["data"]
def pretty_print(dic: dict) -> None:
"""Pretty print a dictionary in YAML format.
Useful for development and debugging.
"""
print(yaml.dump(dic))
def get_gql_query(settings: str, affil: str = "OWNER") -> str:
"""Construct GraphQL query from settings list.
Args:
settings (str): Names of repo settings according to the GraphQL API,
separated by new lines. Use '\n'.join(settings_list).
affil (str, optional): Comma-separated string of author affiliations to their
repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER.
Defaults to "OWNER".
Returns:
str: GraphQL query.
"""
return """{
viewer {
repositories(first: 100, affiliations: [{affil}]) {
nodes {
name
nameWithOwner
isArchived
isFork
{settings}
}
}
organizations(first: 100) {
nodes {
login
repositories(first: 100) {
nodes {
name
nameWithOwner
isArchived
isFork
{settings}
}
}
}
}
}
}""".replace(
"{settings}", settings
).replace(
"{affil}", affil
)
def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]:
"""Load .repo-config.(yml|yaml).
Returns:
tuple[dict[str, Any], list[str], bool]:
- Dictionary of GitHub settings to apply to all your repos
- list of additional logins of your GitHub organizations to query for repos
- boolean whether or not apply settings to repos you forked as well
"""
config = {}
if config_path and not os.path.exists(config_path):
raise FileNotFoundError(
f"Path to config file was set as '{config_path}' but no such file exists."
)
elif config_path:
with open(config_path) as file:
config = yaml.safe_load(file.read())
for path in (".repo-config.yml", ".repo-config.yaml"):
if os.path.exists(path):
with open(path) as file:
config = yaml.safe_load(file.read())
if config == {}:
raise ValueError(
"No config file could be found. See https://git.io/JWa5o for an example "
"config file. All fields except 'settings' are optional."
)
settings = config["settings"]
orgs = config["orgs"] or []
skipForks = config["skipForks"] or True
return settings, orgs, skipForks
| 2.765625 | 3 |
kosudoku/montecarlo.py | buzbarstow/collectionmc | 0 | 12799576 | # ------------------------------------------------------------------------------------------------ #
def ImportEssentialityData(fileName):
# Not yet ready for prime time
# Import a defined format essentiality data file
# Assumes that data is in the format: locus tag, gene name, essentiality
from .utils import ParseCSVLine
fileHandle = open(fileName, 'r')
data = fileHandle.readlines()
dataDict = {}
i = 0
while i < len(data):
# Ignore comment lines
if data[i][0] != '#':
dataLine = ParseCSVLine(data[i])
dataDict[dataLine[0]] = [dataLine[1], dataLine[2]]
i += 1
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray):
# Not yet ready for prime time
# Build essentiality data dict that is keyed by locus tag
essentialityDict = {}
locusTags = []
headersWithoutSysName = []
i = 0
while i < len(headers):
if headers[i] != 'sysName':
headersWithoutSysName.append(headers[i])
i += 1
dataDict = {}
for line in dataArray:
dataDict[line['sysName']] = {}
for header in headersWithoutSysName:
dataDict[line['sysName']][header] = line[header]
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures):
# Not yet ready for prime time
i = 0
cdsDict = {}
while i < len(cdsFeatures):
locusTag = cdsFeatures[i].tagDict['locus_tag'][0]
cdsDict[locusTag] = cdsFeatures[i]
i += 1
return cdsDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants):
from numpy.random import choice
import pdb
nonEssentialGeneCount = len(hittableFeatures)
featureHitCountDict = {}
for feature in hittableFeatures:
featureHitCountDict[feature] = 0
featuresHitAtLeastOnce = 0
featuresHitAtLeastOnceVersusMutant = []
i = 1
while i <= maxMutants:
randomCoord = int(choice(hittableTransposonCoords))
featuresToBeHit = transposonCoordToFeatureDict[randomCoord]
isAnyFeatureIncludingThisCoordNotHittable = False
for featureToBeHit in featuresToBeHit:
if featureToBeHit in notHittableFeatures:
isAnyFeatureIncludingThisCoordNotHittable = True
if isAnyFeatureIncludingThisCoordNotHittable == False:
for featureToBeHit in featuresToBeHit:
try:
featureHitCountDict[featureToBeHit] += 1
except:
pdb.set_trace()
if featureHitCountDict[featureToBeHit] == 1:
featuresHitAtLeastOnce += 1
featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce)
i += 1
return featuresHitAtLeastOnceVersusMutant
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants):
from scipy import unique, intersect1d
from numpy import mean, std, arange
import xml.etree.ElementTree as ET
import pdb
transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r')
transposonCoordToFeatureDict = {}
hittableFeatures = []
hittableTransposonCoords = []
notHittableTransposonCoords = []
notHittableFeatures = []
otherFeatures = []
tree = ET.parse(transposonCoordToFeatureDictFile)
root = tree.getroot()
importedCoordsList = root.findall('coord')
for coord in importedCoordsList:
coordinate = int(coord.attrib['coord'])
loci = coord.findall('locus')
importedCoordsKeys = transposonCoordToFeatureDict.keys()
if coordinate not in importedCoordsKeys:
transposonCoordToFeatureDict[coordinate] = []
for locus in loci:
locusName = locus.attrib['locus']
essentiality = locus.attrib['essentiality']
transposonCoordToFeatureDict[coordinate].append(locusName)
if essentiality == 'Dispensable':
hittableTransposonCoords.append(coordinate)
hittableFeatures.append(locusName)
elif essentiality == 'Essential':
notHittableFeatures.append(locusName)
notHittableTransposonCoords.append(coordinate)
else:
otherFeatures.append(locusName)
print(locusName)
hittableFeatures = unique(hittableFeatures)
hittableTransposonCoords = unique(hittableTransposonCoords)
notHittableFeatures = unique(notHittableFeatures)
otherFeatures = unique(otherFeatures)
intersection = intersect1d(hittableFeatures, notHittableFeatures)
# Simulate a number of picking runs
featuresHitAtLeastOnceTrialsArray = []
i = 0
while i < numberOfTrials:
featuresHitAtLeastOnceVersusMutant = \
SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants)
featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant)
i += 1
# Collect together then data from the picking runs for calculation of mean and standard
# deviation of number of hits picked
i = 0
collectedFeatureHitCountArray = []
while i < len(featuresHitAtLeastOnceTrialsArray[0]):
collectedFeatureHitCountArray.append([])
i += 1
i = 0
while i < len(collectedFeatureHitCountArray):
j = 0
while j < len(featuresHitAtLeastOnceTrialsArray):
collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i])
j += 1
i += 1
averageFeatureHitCount = []
sdFeatureHitCount = []
featureHitCountUpperBound = []
featureHitCountLowerBound = []
# Calculate the mean and standard deviation of the number of unique features hit at each pick
# from the trials
i = 0
while i < len(collectedFeatureHitCountArray):
averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i]))
sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i]))
featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i])
featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i])
i += 1
# Prepare an x axis (the number of mutants picked) for the output
iAxis = arange(1, maxMutants+1, 1)
noUniqHittableFeatures = len(hittableFeatures)
return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \
featureHitCountLowerBound, noUniqHittableFeatures ]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures):
from numpy import exp, array, float
uniqueGenesHit = []
i = 0
while i < len(iAxis):
ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures))
uniqueGenesHit.append(ans)
i += 1
uniqueGenesHit = array(uniqueGenesHit, float)
return uniqueGenesHit
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindATandTAPositions2(genomeFile, format='genbank'):
# Does the same thing as FindATandTAPositions but can work with a GenBank or a Fasta file, \
# so you only need one file format
import re
from pdb import set_trace
if format == 'genbank':
sequence = ImportGenBankSequence(genomeFile)
elif format == 'fasta':
sequence = ImportFastaSequence(genomeFile)
ATandTAPositions = []
atRegex = re.compile('(at|ta)', re.IGNORECASE)
# set_trace()
i = 0
while i < len(sequence) - 1:
atMatch = atRegex.match(sequence[i:i+2])
if atMatch != None:
ATandTAPositions.append(i+1)
i += 1
return [ATandTAPositions, sequence]
# ------------------------------------------------------------------------------------------------ #
| 2.859375 | 3 |
tests/test_actions.py | pauloromeira/onegram | 150 | 12799577 | import pytest
from onegram.exceptions import NotSupportedError
from onegram import follow, unfollow
from onegram import like, unlike
from onegram import comment, uncomment
from onegram import save, unsave
def test_follow(logged, user, cassette):
if logged:
response = follow(user)
assert response == {'result': 'following',
'status': 'ok',
'user_id': user['id']}
response = unfollow(user)
assert response == {'status': 'ok', 'user_id': user['id']}
else:
with pytest.raises(NotSupportedError):
follow(user)
with pytest.raises(NotSupportedError):
unfollow(user)
def test_like(logged, post, cassette):
if logged:
response = like(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unlike(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
like(post)
with pytest.raises(NotSupportedError):
unlike(post)
def test_comment(logged, post, cassette):
text = 'awesome!'
if logged:
commentary = comment(text, post)
assert commentary['id']
assert commentary['text'] == text
assert commentary['status'] == 'ok'
assert commentary['post_id'] == post['id']
response = uncomment(commentary)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
comment(text, post)
with pytest.raises(NotSupportedError):
fake_comment = {'id': '1', 'post_id': '2'}
uncomment(fake_comment)
def test_save(logged, post, cassette):
if logged:
response = save(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unsave(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
save(post)
with pytest.raises(NotSupportedError):
unsave(post)
| 2.375 | 2 |
doc/example.py | ericbulloch/authorize | 1 | 12799578 | <filename>doc/example.py<gh_stars>1-10
from authorize import cim
from pprint import pprint
# Note that you need to specify a delimiter and an encapsulator
# for your account (either in your account dashboard or through
# the constructor of any of the API objects
cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True,
delimiter=u",", encapsulator=u"")
# We create a profile for one of our users.
tree = cim_api.create_profile(
card_number=u"4111111111111111",
expiration_date=u"2008-07",
customer_id=u"test_account")
# Store the profile id somewhere so that we can later retrieve it.
# CIM doesn't have a listing or search functionality so you'll
# have to keep this somewhere safe and associated with the user.
profile_id = tree.customer_profile_id.text_
# Retrieve again the profile we just created using the profile_id
tree = cim_api.get_profile(customer_profile_id=profile_id)
pprint(tree)
# And let's now try to create a transaction on that profile.
resp = cim_api.create_profile_transaction(
customer_profile_id=profile_id,
amount=50.0
)
pprint(resp)
# We did what we needed, we can remove the profile for this example.
pprint(cim_api.delete_profile(customer_profile_id=profile_id))
| 2.6875 | 3 |
advent_2020/report_repair.py | SutterButter4/AdventOfCode | 0 | 12799579 | <reponame>SutterButter4/AdventOfCode
from typing import List, Tuple, Optional
def report_repair(input_data_filepath: str) -> Tuple[int, int]:
report_data = parse_input(input_data_filepath)
report_data.sort()
pair = find_pair(report_data, 2020)
(a, b) = (-1, -1)
if pair:
(a, b) = pair
triple = find_triple(report_data, 2020)
(c, d, e) = (-1, -1, -1)
if triple:
(c, d, e) = triple
return a * b, c * d * e
def parse_input(input_data_filepath: str) -> List[int]:
with open(input_data_filepath) as file:
input_str = file.read()
return [int(x) for x in input_str.split("\n")]
def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]:
front_index = 0
end_index = len(sorted_report) - 1
while front_index != end_index:
a = sorted_report[front_index]
b = sorted_report[end_index]
if a + b == target_num:
return a, b
elif a + b < target_num:
front_index += 1
else: # a + b > advent_2020
end_index -= 1
# given valid input, should never be reached.
return None
def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]:
for i in sorted_report:
pair = find_pair(sorted_report, target-i)
if pair:
a, b = pair
return a, b, i
return None
| 3.53125 | 4 |
aws/ec2util.py | skinheadbob/trinity | 2 | 12799580 | import logging
import math
import os
import time
import boto3
from pyspark.sql import SparkSession
def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int:
client = boto3.client('autoscaling', region_name=region_name)
asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]
return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService'])
def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int,
check_interval_sec: int = 15,
ten_inst_timeout_sec: int = 30):
aws_client = boto3.client('autoscaling', region_name=region_name)
aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name,
DesiredCapacity=desired_capacity,
MinSize=0,
MaxSize=desired_capacity)
current_capacity = get_asg_inservice_instance_count(asg_name, region_name)
adjust_capacity = abs(desired_capacity - current_capacity)
timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10)
max_trial = int(math.ceil(timeout_sec / check_interval_sec))
for trial in range(0, max_trial + 1):
inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name)
if inservice_instance_count != desired_capacity:
time.sleep(check_interval_sec)
else:
return
logging.warning('Failed to adjust the capacity of asg "%(g)s" from %(f)d to %(t)d in %(s)d seconds'
% {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec})
def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int,
timeout_sec: int, check_interval_sec: int = 3):
max_trials = int(math.ceil(timeout_sec / check_interval_sec))
for trial in range(0, max_trials + 1):
current_size = get_spark_worker_node_count(spark)
if current_size != desired_cluster_size:
time.sleep(check_interval_sec)
else:
return
logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d seconds, current cluster size: %(c)d'
% {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)})
def get_spark_worker_node_count(spark):
# noinspection PyProtectedMember
return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1
def setup_spark_session() -> SparkSession:
from envconfig.env import env
asg_name = env.aws_asg_name
region_name = env.aws_region_name
cluster_size = env.aws_cluster_size
adjust_ec2_asg(asg_name, region_name, cluster_size)
os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python
os.environ['PYTHONPATH'] = env.spark_pythonpath
spark = SparkSession.builder \
.master(env.spark_master) \
.appName('Trinity %(e)s' % {'e': env.env}) \
.config('spark.executor.uri', env.spark_executor_uri) \
.config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \
.config('spark.driver.memory', env.spark_driver_memory) \
.config('spark.executor.memory', env.spark_executor_memory) \
.getOrCreate()
wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10)
return spark
def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str = None,
setup_spark_cluster_timeout_sec: int = None):
from envconfig.env import env
if aws_cluster_size is None:
aws_cluster_size = env.aws_cluster_size
if aws_asg_name is None:
aws_asg_name = env.aws_asg_name
if aws_region_name is None:
aws_region_name = env.aws_region_name
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Adjusting AWS autoscaling group "%(g)s" capacity to %(c)d ...'
% {'g': aws_asg_name, 'c': aws_cluster_size})
adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size)
logging.info('Waiting for workers to join Spark cluster ...')
if setup_spark_cluster_timeout_sec is None:
setup_spark_cluster_timeout_sec = aws_cluster_size * 20
wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec)
logging.info('Notebook and Spark cluster are standing by')
def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None):
from envconfig.env import env
if aws_asg_name is None:
aws_asg_name = env.aws_asg_name
if aws_region_name is None:
aws_region_name = env.aws_region_name
logging.info('Shutting down AWS autoscaling group "%(g)s" by adjusting capacity to 0'
% {'g': aws_asg_name})
adjust_ec2_asg(aws_asg_name, aws_region_name, 0)
| 2.296875 | 2 |
account/forms.py | nvvu99/news-aggregator | 0 | 12799581 | from django import forms
from .models import User
from news.models import Category
from django.utils.translation import gettext, gettext_lazy as _
from django.contrib.auth import authenticate, forms as auth_forms
from django.db.models import Q
class LoginForm(forms.Form):
username = forms.CharField(
label=_('Username'),
)
password = forms.CharField(
label=_("<PASSWORD>"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}),
)
error_messages = {
'invalid_login': _("Username hoặc mật khẩu không đúng."),
}
def __init__(self, *args, **kwargs):
self.user_cache = None
return super().__init__(*args, **kwargs)
def get_initial_for_field(self, field, field_name):
return ''
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password is not None:
self.user_cache = authenticate(
username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
return self.cleaned_data
def get_user(self):
return self.user_cache
class RegisterForm(forms.ModelForm):
error_messages = {
'password_mismatch': _('Mật khẩu không khớp.'),
}
re_password = forms.CharField(
label='<PASSWORD>',
widget=forms.PasswordInput,
)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password',)
labels = {
'email': 'Email',
'password': '<PASSWORD>',
'first_name': 'Tên',
'last_name': 'Họ',
}
def get_initial_for_field(self, field, field_name):
return ''
def clean_re_password(self):
password = self.cleaned_data.get('password')
re_password = self.cleaned_data.get('re_password')
if password and re_password and password != re_password:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch'
)
return re_password
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class SetPasswordForm(auth_forms.SetPasswordForm):
error_messages = {
'password_mismatch': _('Mật khẩu không khớp.'),
}
new_password1 = forms.CharField(
label=_("Mật khẩu"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
strip=False,
)
new_password2 = forms.CharField(
label=_("Nhập lại mật khẩu"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
)
class PasswordChangeForm(SetPasswordForm):
error_messages = {
**SetPasswordForm.error_messages,
'password_incorrect': _("Mật khẩu cũ bạn vừa nhập không đúng."),
}
old_password = forms.CharField(
label=_("M<PASSWORD>"),
strip=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'current-password', 'autofocus': True}),
)
field_order = ['old_password', 'new_password1', 'new_<PASSWORD>']
def get_initial_for_field(self, field, field_name):
return ''
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
def clean(self):
new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>')
self.user.set_password(new_password)
self.user.save()
return self.cleaned_data
class TopicOrganizeForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.following_categories = user.following_categories.all()
self.fields['following_categories'].queryset = self.following_categories
class Meta:
model = User
fields = ('following_categories', )
widgets = {
'following_categories': forms.CheckboxSelectMultiple(),
}
class TopicAddForm(TopicOrganizeForm):
adding_categories = forms.ModelMultipleChoiceField(queryset=None)
def __init__(self, user, *args, **kwargs):
super().__init__(user, *args, **kwargs)
following_query = Q()
for category in self.following_categories:
following_query |= Q(pk=category.pk)
self.fields['adding_categories'].queryset = Category.objects.exclude(following_query)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories'))
return cleaned_data
class Meta(TopicOrganizeForm.Meta):
fields = ('following_categories', 'adding_categories', )
widgets = {
'adding_categories': forms.CheckboxSelectMultiple()
}
class UserUpdateForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
return super().__init__(*args, **kwargs)
class Meta:
model = User
fields = ('last_name', 'first_name', 'avatar')
labels = {
'last_name': 'Họ',
'first_name': 'Tên',
'avatar': 'Avatar',
}
| 2.203125 | 2 |
falcon_rest/session.py | boomletsgo/falcon-rest | 0 | 12799582 | <reponame>boomletsgo/falcon-rest
from contextlib import contextmanager
from sqlalchemy.orm import sessionmaker
@contextmanager
def make(db_engine, **kwargs):
"""Scope a db_session for a context, and close it afterwards"""
db_session = sessionmaker(bind=db_engine, **kwargs)()
try:
yield db_session
finally:
db_session.close()
| 2.1875 | 2 |
parabox/behaviour/movable/movable.py | DenisMinich/parabox | 0 | 12799583 | from kivy.properties import (
NumericProperty, ReferenceListProperty, BooleanProperty)
from kivy.vector import Vector
from parabox.base_object import BaseObject
class Movable(BaseObject):
"""Mixins for movable classes"""
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
acceleration_x = NumericProperty(0)
acceleration_y = NumericProperty(0)
acceleration = ReferenceListProperty(acceleration_x, acceleration_y)
in_move = BooleanProperty(False)
def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs):
"""Movable constructor
:param velocity: velocity vector
:type velocity: kivy.vector.Vector
:param speed_limit: speed limit for object. 10 by default
:type speed_limit: float
"""
super(Movable, self).__init__(*args, **kwargs)
self.speed_limit = speed_limit
self.velocity = velocity
self.add_to_collections(["movable"])
self.register_event_type('on_move')
self.register_event_type('on_move_x')
self.register_event_type('on_move_y')
self.register_event_type('on_stop')
self.register_event_type('on_stop_x')
self.register_event_type('on_stop_y')
self.bind(on_update=self.move)
def _update_velocity(self):
"""Change velocity because of acceleration"""
self.velocity = Vector(*self.velocity) + Vector(*self.acceleration)
velocity_vector = Vector(self.velocity)
if velocity_vector.length() > self.speed_limit:
self.velocity = (velocity_vector * self.speed_limit /
velocity_vector.length())
def move(self, instance):
"""Move object
:param instance: self analog
:type instance: kivy.uix.widget.Widget
"""
self._update_velocity()
self._change_position()
self._reset_acceleration()
def _change_position(self):
"""Change objects position"""
self.x += self.velocity_x
if self.velocity_x:
self.dispatch("on_move_x")
self.y += self.velocity_y
if self.velocity_y:
self.dispatch("on_move_y")
if self.velocity_y or self.velocity_x:
self.dispatch("on_move")
def _reset_acceleration(self):
"""Set acceleration to zero"""
self.acceleration_x = self.acceleration_y = 0
def move_stop_x(self):
"""Stop in x direction"""
self.velocity_x = 0
def move_stop_y(self):
"""Stop in y direction"""
self.velocity_y = 0
def move_stop(self):
"""Stop object"""
self.move_stop_x()
self.move_stop_y()
def on_velocity_x(self, instance, value):
"""Dispatch event on x move"""
if not value and self.in_move:
self.dispatch("on_stop_x")
if not self.velocity_y:
self.dispatch("on_stop")
def on_velocity_y(self, instance, value):
"""Dispatch event on y move"""
if not value and self.in_move:
self.dispatch("on_stop_y")
if not self.velocity_x:
self.dispatch("on_stop")
def on_move(self):
"""On move event"""
self.in_move = True
def on_move_x(self):
"""On move x event"""
pass
def on_move_y(self):
"""On move y event"""
pass
def on_stop(self):
"""On stop event"""
self.in_move = False
def on_stop_x(self):
"""On stop x event"""
pass
def on_stop_y(self):
"""On stop y event"""
pass
| 2.921875 | 3 |
app/core/settings.py | oxfn/owtest | 0 | 12799584 | from functools import lru_cache
from pydantic import BaseSettings
class Settings(BaseSettings):
"""Settings model."""
secret_key: str = ""
mongo_url: str = ""
testing: bool = False
@lru_cache(typed=False)
def get_settings() -> Settings:
"""Initialize settings."""
return Settings()
| 2.53125 | 3 |
tests/unittest/test_autograd.py | yuhonghong66/minpy | 1,271 | 12799585 | from __future__ import print_function
import minpy.numpy as mp
import numpy as np
import minpy.dispatch.policy as policy
from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm
import time
# mp.set_policy(policy.OnlyNumPyPolicy())
def test_autograd():
@convert_args
def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b)
cache = next_h, prev_h, x, Wx, Wh
return next_h, cache
def rnn_step_backward(dnext_h, cache):
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
# Load values from rnn_step_forward
next_h, prev_h, x, Wx, Wh = cache
# Gradients of loss wrt tanh
dtanh = dnext_h * (1 - next_h * next_h) # (N, H)
# Gradients of loss wrt x
dx = dtanh.dot(Wx.T)
# Gradients of loss wrt prev_h
dprev_h = dtanh.dot(Wh.T)
# Gradients of loss wrt Wx
dWx = x.T.dot(dtanh) # (D, H)
# Gradients of loss wrt Wh
dWh = prev_h.T.dot(dtanh)
# Gradients of loss wrt b. Note we broadcast b in practice. Thus result of
# matrix ops are just sum over columns
db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :]
return dx, dprev_h, dWx, dWh, db
# preparation
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
# test MinPy
start = time.time()
rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h)
grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5)))
grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0]
end = time.time()
print("MinPy total time elapsed:", end - start)
# test NumPy
start = time.time()
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
out *= dnext_h # to agree with MinPy calculation
end = time.time()
print("NumPy total time elapsed:", end - start)
print()
print("Result Check:")
print('dx error: ', rel_error(dx, grad_arrays[0]))
print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1]))
print('dWx error: ', rel_error(dWx, grad_arrays[2]))
print('dWh error: ', rel_error(dWh, grad_arrays[3]))
print('db error: ', rel_error(db, grad_arrays[4]))
def test_zero_input_grad():
def foo1(x):
return 1
bar1 = grad(foo1)
assert bar1(0) == 0.0
def test_reduction():
def test_sum():
x_np = np.array([[1, 2], [3, 4], [5, 6]])
x_grad = np.array([[1, 1], [1, 1], [1, 1]])
def red1(x):
return mp.sum(x)
def red2(x):
return mp.sum(x, axis=0)
def red3(x):
return mp.sum(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad)
def test_max():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 1], [1, 0], [0, 0]])
x_grad2 = np.array([[0, 1], [1, 0], [1, 1]])
x_grad3 = np.array([[0, 1], [1, 0], [0, 0]])
def red1(x):
return mp.max(x)
def red2(x):
return mp.max(x, axis=1)
def red3(x):
return mp.max(x, axis=1, keepdims=True)
def red4(x):
return mp.max(x, axis=0)
def red5(x):
return mp.max(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
def test_min():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 0], [0, 0], [1, 1]])
x_grad2 = np.array([[1, 0], [0, 1], [1, 1]])
x_grad3 = np.array([[0, 0], [0, 0], [1, 1]])
def red1(x):
return mp.min(x)
def red2(x):
return mp.min(x, axis=1)
def red3(x):
return mp.min(x, axis=1, keepdims=True)
def red4(x):
return mp.min(x, axis=0)
def red5(x):
return mp.min(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
test_sum()
test_max()
test_min()
if __name__ == "__main__":
test_autograd()
test_zero_input_grad()
test_reduction()
| 2.453125 | 2 |
geometry.py | Justcoderguy/python_training | 0 | 12799586 | from point import Point
__author__ = 'pzqa'
l1 = list(map(lambda i: Point(i, i*i), range(-5, 6)))
l2 = list(filter(lambda el: el.x % 2 == 0, l1))
print(l1)
print(l2)
| 2.65625 | 3 |
Python/python_basic_project.py | Bozmenn/Patika | 0 | 12799587 | #SORU 1_______________________________________________________
l_r=[]
def flatten(l):
for j in l:
if type(j) == list:
flatten(j)
else:
l_r.append(j)
return l_r
x= [[1,"a",["cat"],2],[[[3]],"dog"],4,5]
print(flatten(x))
#SORU 2_________________________________________________________
def rev(l):
for j in l:
if type(j) == list:
j.reverse()
l.reverse()
return l
x=[[1, 2],[3, 4],[5, 6, 7]]
print(rev(x))
| 3.78125 | 4 |
main/academy/migrations/0001_initial.py | UsamaKashif/studentutor | 7 | 12799588 | <reponame>UsamaKashif/studentutor<gh_stars>1-10
# Generated by Django 3.0.4 on 2020-08-31 17:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Academy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('textArea', models.CharField(default='', max_length=300, null=True)),
('username', models.CharField(default='username', max_length=100)),
('name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254, unique=True)),
('city', models.CharField(max_length=100)),
('total_ads', models.IntegerField(default=0)),
('ads_deleted', models.IntegerField(default=0)),
('phone', models.CharField(max_length=11)),
('profile_complete', models.BooleanField(default=False)),
('invitations_sent', models.IntegerField(default=0)),
('invitations_sent_accepted', models.IntegerField(default=0)),
('invitations_sent_rejected', models.IntegerField(default=0)),
('invitations_recieved', models.IntegerField(default=0)),
('invitations_recieved_accepted', models.IntegerField(default=0)),
('invitations_recieved_rejected', models.IntegerField(default=0)),
('ad_post_count', models.IntegerField(default=0)),
('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')),
('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.648438 | 2 |
regaudio.py | janhradek/regaudio | 0 | 12799589 | <filename>regaudio.py
#!/usr/bin/env python
'''
Created on Mar 8, 2012
@author: <NAME>, <EMAIL>
Requirements
- Python 3
- PyQt4
- SQLAlchemy
- SQLite
- numpy (diff)
Features
- filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating
- groups, group selector, favorites (first in the selector)
- filter with groups
- advanced sorting - resorting sorted (remember previous sort and apply it after the new sort)
- context menu on tracks (the same as tracks menu)
- detele tracks (or group tracks)
- group management - add group (manually), rename group, remove group
- merge tracks (and retain links to groups)
- NOTE: merging tracks in the same group leads to duplicate grouptracks that share the same underlying track; in short, multiple tracks with the same name
- add track to group
- import group name
- import context menu: delete, reset, select existing, as new, sk -> a,n, a,n -> sk
- import delete row(s)
- import search key -> artist & name (and vice versa)
- import real searchkey
- import mm
- import should have some status containing number of tracks that are not yet selected or as new
- import window shouldnt continue after enter is pressed (a problem with the groupname)
- ok button must be disabled if there are any tracks that arent selected or marked as new
- column sizes for importwindow
- import cue (gruop)
- time units
- cuesheet has frames, everywhere else are seconds which themselves are inaccurate
- it would be nice to have exact data type before moving on, datetime.time looks promising (immutable though)
- datetime.time sucks for our purposes, store ms and convert them to and from string as needed (done?)
- implement for import too
- import directory (group)
- stars for rating (it should support tracks in groups too)
- star for new (star vs circle?, there must be visible difference with rating)
- add new tracks to group
- delete tracks from group (not just grouptracks)
- ikona
- configuration file ~/.regaudio : currently only one configuration value: location of the db
- total number of pages
- import window - disable enter and esc to close the window
- automatically change groupname of import or provide an option to do so
- striping certain parts from names like "original mix"
- support flac, m4a (basic)
- tidy up the menus and shortcuts
- (any replacement to cfg)
v0.1.1:
+ import: nameCutToSearchKey - make new search key from track name, but remove any parenthesis and their contents (shortcut ")
+ import: found tracks now marks verbatim matches with a star **, ++ if search strings match
- import: fixed, cue files are expected to be in utf-8 but if they are not (on UnicodeDecodeError) latin1 is used
- merge: fixed, now the links between groups and tracks dont disappear
v0.1.2:
- import: fixed importing mp3 without track number
+ addtogroup: now supported for grouptracks (group mode) too
+ import: tracks column now displays best match indicators
+ track rating in tooltip is now displayed as a set of stars, plus and dashes
+ track and import tables now both have smaller row height and alternating row colors
+ import: it is now possible to directly select multiple cuesheets
+ import: select best ('B' key)
+ import: quick resolve (select best track for **, ++, create new for no match, the rest is left as is)
- tables: columns are now resized properly tanks to wordWrap=False
- both tables: removed grid
+ filter: now doesn't reset automatically, theres a button and menu entry for it
+ filter: a history of last 20 filter rules is now available
+ filter: new advanced filter option !g: allow query groups (by names), note that its also possible to use it with groups and therefore it is possible to query and intersection of groups
+ import: * + _ indicators now work by parts, artist and name are evaluated separately
- both tables: fixed context menu position
v0.1.3:
+ all tracks: pressing all tracks again will return to previous group
- delete: fixed the dialog message when deleting a track (not a group track)
- group filter: enabling group filter now focuses the filter editline
- in groups menu: groups are now listed alphabetically
- id3v1: id3v1 reader fixed
+ merge: now allowed in groupmode (maybe it's confusing)
+ group delete: deleting a group doesn't reset the group filter
+ sort: sorting grouptracks by number is remembered between all tracks/groups switches (so switching back and forth between groups and all tracks doesn't screw up the ordering)
+ status: reworked the status line, now it provides some basic statistics about the tracks (counts and ratings)
- adding tracks to a group with the same tracks already present didn't work (it was impossible to make duplicities except through import)
+ detachcopy: make a copy of a track in a group (grouptrack) and replace the current link in the group with it
+ relink: change the grouptrack to link to some other similar track
v0.1.4:
- migration to PyQt5
FIXME:
- import: import file/directory selection crashes the application
- import: saying no to name cleanup, cleans up the name anyway
TODO:
- (artist translator to help fix some issues with names)
- (import should do some things automaticaly - cleanup group name, cleanup track names)
- safe delete (introduce a new flag and set it instead of deletion, filter by it)
'''
import sys
import os
from PyQt5 import QtWidgets
from ui import mainWindow
import model.config # this will read the config
VERSION = 0.1.4
def main():
app = QtWidgets.QApplication(sys.argv)
window=mainWindow.MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.234375 | 2 |
analysis & data processing/generate_frequency_from_entity.py | jackwong95/TDS3751_SocialMediaCampaign | 0 | 12799590 | entity = {}
with open("../output/analysis twitter output/entity.txt", "r", encoding="utf-8") as f:
for line in f:
word = line.replace('\n','')
if word not in entity:
entity[word] = 0
entity[word] = entity[word] + 1
with open("../output/analysis output/entity.txt", "r", encoding="utf-8") as f:
for line in f:
word = line.replace('\n','')
if word not in entity:
entity[word] = 0
entity[word] = entity[word] + 1
entity_freq = open("../output/entity_frequency.csv", "w", encoding="utf-8")
entity_freq.write('entity,frequency\n')
for key in entity:
entity_freq.write(key+','+str(entity[key])+'\n')
entity_freq.close()
print(entity) | 2.703125 | 3 |
src/processor.py | OlegDurandin/AuthorStyle | 3 | 12799591 | from abc import ABC, abstractmethod
import os
import spacy_udpipe
from .utils import load_pickled_file
from .settings import PATH_TO_RUS_UDPIPE_MODEL
import spacy
def processTag(tag_representation):
res = {}
if len(tag_representation.split('|')) > 0:
for one_subtag in tag_representation.split('|'):
if len(one_subtag.split('=')) > 1:
key = one_subtag.split('=')[0]
value = one_subtag.split('=')[1]
res[key] = value
return res
class SyntaxVectorizer(ABC):
def setup_rules(self):
pass
def text_structures_initializer(self):
pass
def calculate_morpho_tags(self, current_token):
pass
def normalize_morpho_tags(self):
pass
if __name__ == "__main__":
# Just checking
# Please, pay attention, that this class imported
t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl'))
print('Pickle loaded')
current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL)
print('Model loaded')
hj = SyntaxVectorizerRU(current_nlp_module)
hj.convert_to_attributes(t['Trees'])
resAttribs = hj.get_res_attributes()
print('Thats all') | 2.40625 | 2 |
krikos/nn/network.py | ShubhangDesai/nn-micro-framework | 1 | 12799592 | import numpy as np
from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout
class Network(object):
def __init__(self):
super(Network, self).__init__()
self.diff = (BatchNorm, BatchNorm2d, Dropout)
def train(self, input, target):
raise NotImplementedError
def eval(self, input):
raise NotImplementedError
class Sequential(Network):
def __init__(self, layers, loss, lr, regularization=None):
super(Sequential, self).__init__()
self.layers = layers
self.loss = loss
self.lr = lr
self.regularization = regularization
def train(self, input, target):
layers = self.layers
loss = self.loss
regularization = self.regularization
l = 0
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "train"
input = layer.forward(input)
if regularization is not None:
for _, param in layer.params.items():
l += regularization.forward(param)
l += loss.forward(input, target)
dout = loss.backward()
for layer in reversed(layers):
dout = layer.backward(dout)
for param, grad in layer.grads.items():
if regularization is not None:
grad += regularization.backward(layer.params[param])
layer.params[param] -= self.lr * grad
return np.argmax(input, axis=1), l
def eval(self, input):
layers = self.layers
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "test"
input = layer.forward(input)
return np.argmax(input, axis=1) | 2.828125 | 3 |
nonebot-plugin-pixivbot/mongo_helper.py | umimori13/mai-bot | 0 | 12799593 | import getpass
from pymongo import MongoClient
def main():
hostname = input("MongoDB Hostname (Default: localhost): ")
if not hostname:
hostname = "localhost"
port = input("MongoDB Port (Default: 27017): ")
if not port:
port = "27017"
username = input("MongoDB Username: ")
password = getpass.getpass("MongoDB Password: ")
database_name = input("MongoDB Database Name: ")
url = f"mongodb://{username}:{password}@{hostname}:{port}"
client = MongoClient(url)
db = client[database_name]
option = input("1: Create Indexes\n"
"2: Drop TTL Indexes\n"
"3: Drop Common Indexes\n"
"4: Drop Database\n"
"Option: ")
if option == "1":
db['download_cache'].create_index([("illust_id", 1)], unique=True)
db['illust_detail_cache'].create_index([("illust.id", 1)], unique=True)
db['illust_ranking_cache'].create_index([("mode", 1)], unique=True)
db['search_illust_cache'].create_index([("word", 1)], unique=True)
db['search_user_cache'].create_index([("word", 1)], unique=True)
db['user_illusts_cache'].create_index([("user_id", 1)], unique=True)
db['other_cache'].create_index([("type", 1)], unique=True)
create_ttl_indexes = input("Create TTL Indexes? [y/N] ")
if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y':
download_cache_expires_in = int(input("Download cache expires in (sec): "))
db['download_cache'].create_index([("update_time", 1)], expireAfterSeconds=download_cache_expires_in)
illust_detail_cache_expires_in = int(input("Illust detail cache expires in (sec): "))
db['illust_detail_cache'].create_index([("update_time", 1)],
expireAfterSeconds=illust_detail_cache_expires_in)
illust_ranking_cache_expires_in = int(input("Illust ranking cache expires in (sec): "))
db['illust_ranking_cache'].create_index([("update_time", 1)],
expireAfterSeconds=illust_ranking_cache_expires_in)
search_illust_cache_expires_in = int(input("Search illust cache expires in (sec): "))
db['search_illust_cache'].create_index([("update_time", 1)],
expireAfterSeconds=search_illust_cache_expires_in)
search_user_cache_expires_in = int(input("Search user cache expires in (sec): "))
db['search_user_cache'].create_index([("update_time", 1)], expireAfterSeconds=search_user_cache_expires_in)
user_illusts_cache_expires_in = int(input("User illusts cache expires in (sec): "))
db['user_illusts_cache'].create_index([("update_time", 1)],
expireAfterSeconds=user_illusts_cache_expires_in)
other_cache_expires_in = int(input("User bookmarks and recommended illusts cache expire in (sec): "))
db['other_cache'].create_index([("update_time", 1)], expireAfterSeconds=other_cache_expires_in)
elif option == "2":
db['download_cache'].drop_index([("update_time", 1)])
db['illust_detail_cache'].drop_index([("update_time", 1)])
db['illust_ranking_cache'].drop_index([("update_time", 1)])
db['search_illust_cache'].drop_index([("update_time", 1)])
db['search_user_cache'].drop_index([("update_time", 1)])
db['user_illusts_cache'].drop_index([("update_time", 1)])
db['other_cache'].drop_index([("update_time", 1)])
elif option == "3":
db['download_cache'].drop_index([("illust_id", 1)])
db['illust_detail_cache'].drop_index([("illust_id", 1)])
db['illust_ranking_cache'].drop_index([("mode", 1)])
db['search_illust_cache'].drop_index([("word", 1)])
db['search_user_cache'].drop_index([("word", 1)])
db['user_illusts_cache'].drop_index([("user_id", 1)])
db['other_cache'].drop_index([("type", 1)])
elif option == "4":
comfirm = input("Sure? [y/N]")
if comfirm == 'y' or comfirm == 'Y':
client.drop_database(database_name)
else:
print("Invalid Option.")
if __name__ == '__main__':
main()
| 2.828125 | 3 |
setup.py | renatoliveira/monta | 0 | 12799594 | <filename>setup.py<gh_stars>0
#!/usr/bin/env python3
from setuptools import setup
setup(
name='monta',
version='1.2',
description='Disk mounting shortcut for use with dmenu.',
author='<NAME>',
url='https://github.com/renatoliveira/monta',
include_package_data=True,
package_data={
'': [
'monta',
'license.txt'
]
},
scripts=[
'monta/scripts/monta',
'monta/scripts/desmonta',
'monta/montautils'
]
)
| 1.34375 | 1 |
udacity/l02c01_celsius_to_fahrenheit.py | mrdvince/tensorflowdevcert | 1 | 12799595 | <filename>udacity/l02c01_celsius_to_fahrenheit.py<gh_stars>1-10
import tensorflow as tf
import numpy as np
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# training data
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i, c in enumerate(celsius_q):
print("{} degrees Celsius = {} degrees Fahrenheit".format(c, fahrenheit_a[i]))
# create model
fc0 = tf.keras.layers.Dense(units=1, input_shape=[1])
model = tf.keras.Sequential([fc0])
# model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])])
# compile model
model.compile(loss="mean_squared_error", optimizer=tf.keras.optimizers.Adam(0.1))
# train model
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training model")
# plot stats
import matplotlib.pyplot as plt
plt.xlabel("Epoch Number")
plt.ylabel("Loss Magnitude")
plt.plot(history.history["loss"])
# use model to predict values
print(model.predict([100.0]))
# layer weights
print(fc0.get_weights())
# more layers
fc0 = tf.keras.layers.Dense(units=4, input_shape=[1])
fc1 = tf.keras.layers.Dense(units=4)
fc2 = tf.keras.layers.Dense(units=1)
model2 = tf.keras.Sequential([fc0, fc1, fc2])
model2.compile(loss="mean_squared_error", optimizer=tf.keras.optimizers.Adam(0.1))
model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
print(model.predict([100.0]))
print(
"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit".format(
model.predict([100.0])
)
)
print("These are the l0 variables: {}".format(fc0.get_weights()))
print("These are the l1 variables: {}".format(fc1.get_weights()))
print("These are the l2 variables: {}".format(fc2.get_weights()))
| 3 | 3 |
piggy/piggy.py | facorazza/piggy | 4 | 12799596 | import logging
import json
import time
from random import random, randint
import asyncio
import aiohttp
import aiosqlite
import aiofiles
import regex
from aiohttp.client_exceptions import ClientConnectorError
from piggy import utils
# Logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
fh = logging.FileHandler("./piggy.log")
ch.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s"
)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
class Piggy:
def __init__(self, loop):
self.loop = loop
async def http_request(
self, method, url,
headers=None, params=None, data=None, response_type="text"
):
await asyncio.sleep(self.settings['connection']["wait_time"])
try:
if method == "GET":
r = await self.session.get(
url,
headers=headers,
params=params
)
logger.debug(f"[GET] {r.url}")
elif method == "POST":
r = await self.session.post(
url,
headers=headers,
data=data
)
logger.debug(f"[POST] {r.url}")
else:
raise ValueError(f"Invalid HTTP method: {method}")
except ClientConnectorError:
logger.error("Could not reach the server. Retrying in 30 seconds.")
await asyncio.sleep(30)
return await self.http_request(
method,
url,
headers=headers,
params=params,
data=data,
response_type=response_type
)
else:
logger.debug(f"Status code: {r.status} {r.reason}")
if r.status == 200:
# Successfull request: decrease retry time
if self.settings['connection']["wait_time"] > 0:
self.settings['connection']["wait_time"] -= 1
if response_type == "text":
res = await r.text()
logger.debug(res)
return res
elif response_type == "json":
res = await r.json()
logger.debug(res)
return res
else:
raise ValueError(f"Invalid response type: {response_type}")
elif r.status == 429:
# Unsuccessfull request: increase retry time
self.settings['connection']["wait_time"] += 1
logger.warning(
f"""Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds."""
)
return await self.http_request(
method,
url,
headers=headers,
params=params,
data=data,
response_type=response_type
)
else:
logger.error(f"Response status: {r.status}")
logger.error(f"Response headers: {r.headers}")
logger.error(await r.text())
raise ValueError(f"Response error: {r.status}")
async def setup(self, settings_path="settings.json"):
logger.info("Loading settings...")
# Load settings
with open(settings_path) as f:
self.settings = json.loads(
regex.sub(r"#.+$", "", f.read(), flags=regex.MULTILINE)
)
# Load comments list for photos
with open("comments/pic_comments.txt") as f:
comments = f.readlines()
self.pic_comments_list = [x.strip() for x in comments]
# Load comments list for videos
with open("comments/video_comments.txt") as f:
comments = f.readlines()
self.video_comments_list = [x.strip() for x in comments]
# Initialize the asynchronous http session
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": self.settings["connection"]["user_agent"]
}
timeout = aiohttp.ClientTimeout(
total=self.settings["connection"]["timeout"]
)
self.session = aiohttp.ClientSession(headers=headers, timeout=timeout)
logger.info("Session initialized.")
# Get the csrf token. It is needed to log in
self.csrf_token = await self._getCsrfTokenFromForm()
async def _getCsrfTokenFromForm(self):
# Get login page and find the csrf token
res = await self.http_request(
"GET",
"https://www.instagram.com/accounts/login/"
)
return regex.findall(
r"\"csrf_token\":\"(.*?)\"",
res,
flags=regex.MULTILINE
)[0]
async def login(self):
payload = {
"username": self.settings["user"]["username"],
"password": self.settings["user"]["password"]
}
headers = {
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
res = await self.http_request(
"POST",
"https://www.instagram.com/accounts/login/ajax/",
headers=headers,
data=payload,
response_type="json"
)
if res["authenticated"]:
logger.info("Logged in!")
self.id = res["userId"]
elif res["message"] == "checkpoint_required":
logger.info("Checkpoint required.")
res = await self.http_request(
"POST",
f"https://www.instagram.com{res['checkpoint_url']}",
headers=headers,
data=payload
)
logger.error(res)
else:
logger.error("Couldn't log in.")
cookies = utils.cookies_dict(self.session.cookie_jar)
self.csrf_token = cookies["csrftoken"]
# Initialize the database
await self._init_database()
async def _init_database(self):
logger.info("Checking database...")
# Connect to the local database and look for the table names
async with aiosqlite.connect("./piggy.db") as db:
logger.debug("Checking table: pics")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS pics (
id INT,
height INT,
width INT,
url TEXT,
tags TEXT
)
"""
)
logger.debug("Checking table: users")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS users (
id TEXT,
username TEXT,
ts_follower INTEGER,
ts_following INTEGER,
follower BOOL,
following BOOL
)
"""
)
logger.debug("Checking table: likes")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS likes (
id INTEGER,
ts INTEGER
)
"""
)
logger.debug("Checking table: comments")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS comments (
id INTEGER,
ts INTEGER,
comment TEXT
)
"""
)
logger.info("Updating followers and following lists.")
await db.execute("UPDATE users SET follower=0, following=1")
for username in await self.followers():
await db.execute(
"UPDATE users SET follower=0 WHERE username=?",
(username,)
)
for username in await self.following():
await db.execute(
"UPDATE users SET following=1 WHERE username=?",
(username,)
)
await db.commit()
async def followers(self, username=None):
followers = []
if username is None:
id = self.id
else:
user = await self.get_user_by_username(username)
id = user["graphql"]["user"]["id"]
params = {
"query_hash": "37479f2b8209594dde7facb0d904896a",
"variables": json.dumps({"id": str(id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_followed_by"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_followed_by"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": str(id), "first": 50, "after": end_cursor}
)
for user in res["data"]["user"]["edge_followed_by"]["edges"]:
followers.append(user["node"]["username"])
return followers
async def following(self, username=None):
following = []
if username is None:
id = self.id
else:
user = await self.get_user_by_username(username)
id = user["graphql"]["user"]["id"]
params = {
"query_hash": "58712303d941c6855d4e888c5f0cd22f",
"variables": json.dumps({"id": str(id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_follow"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_follow"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": str(id), "first": 50, "after": end_cursor}
)
for user in res["data"]["user"]["edge_follow"]["edges"]:
following.append(user["node"]["username"])
return following
async def feed(self, explore=True, users=[], hashtags=[], locations=[]):
"""
Generates a feed based on the passed parameters. Multiple parameters
can be passed at the same time.
Args:
explore: [Bool] If True the explore page will be added to to the
feed.
users: [List of usernames] Their media will be pulled and added to
the feed.
hashtags: [List of hastags] Media with those hashtags will be added
to the feed.
locations: [List of locations ids] Media with those locations will
be added to the feed.
Retruns:
Yields a media from the generated feed.
"""
# Initialize asynchronous queue where the feed elements will be
# temporarely stored
q = asyncio.Queue()
if explore:
# Add the "explore" feed to the queue
asyncio.ensure_future(self._explore_feed(q))
if len(users):
# Add all the media from the given users to the queue
for user in users:
asyncio.ensure_future(self._user_feed(q, user))
if len(hashtags):
# Add all the media from the given hashtags to the queue
for hashtag in hashtags:
asyncio.ensure_future(self._hashtag_feed(q, hashtag))
if len(locations):
# Add all the media from the given locations to the queue
for location in locations:
asyncio.ensure_future(self._location_feed(q, location))
# Keep on yielding media while more is loaded
while 1:
while not q.empty():
yield await q.get()
await asyncio.sleep(1e-12)
async def _explore_feed(self, q):
params = {
"query_hash": "ecd67af449fb6edab7c69a205413bfa7",
"variables": json.dumps({"first": 24})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_web_discover_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_web_discover_media"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"first": 50, "after": end_cursor}
)
for media in res["data"]["user"]["edge_web_discover_media"]["edges"]:
await q.put(media["node"])
async def _user_feed(self, q, user):
user = await self.get_user_by_usernameUsername(user)
id = user["id"]
params = {
"query_hash": "a5164aed103f24b03e7b7747a2d94e3c",
"variables": json.dumps({"id": id, "first": 24})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_owner_to_timeline_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_owner_to_timeline_media"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": id, "first": 50, "after": end_cursor}
)
for media in res["data"]["user"]["edge_web_discover_media"]["edges"]:
await q.put(media["node"])
async def _hashtag_feed(self, q, hashtag):
count = 0
params = {
"query_hash": "1780c1b186e2c37de9f7da95ce41bb67",
"variables": json.dumps({"tag_name": hashtag, "first": count})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["hashtag"]["edge_hashtag_to_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["hashtag"]["edge_hashtag_to_media"]["page_info"]["end_cursor"]
count += 1
params["variables"] = json.dumps(
{"tag_name": hashtag, "first": count, "after": end_cursor}
)
for media in res["data"]["hashtag"]["edge_hashtag_to_media"]["edges"]:
await q.put(media["node"])
async def _location_feed(self, q, location_id):
count = 0
params = {
"query_hash": "1b84447a4d8b6d6d0426fefb34514485",
"variables": json.dumps({"id": str(location_id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["location"]["edge_location_to_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["location"]["edge_location_to_media"]["page_info"]["end_cursor"]
count += 1
params["variables"] = json.dumps(
{
"id": str(location_id),
"first": 50,
"after": str(end_cursor)
}
)
for media in res["data"]["location"]["edge_location_to_media"]["edges"]:
await q.put(media["node"])
async def print(self, media):
"""
Gives a visual representation of a media.
Args:
media: The media to be printed.
Returns:
None
"""
logger.info("#--------"*3+"#")
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
likes = media["edge_liked_by"]["count"]
comments = media["edge_media_to_comment"]["count"]
shortcode = media["shortcode"]
res = await self.http_request(
"GET",
f"https://www.instagram.com/p/{shortcode}/",
params="__a=1",
response_type="json"
)
username = res["graphql"]["shortcode_media"]["owner"]["username"]
logger.info(
f"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\n❤️ {likes}, 💬 {comments}"
)
try:
caption = media["edge_media_to_caption"]["edges"][0]["node"]["text"]
except IndexError:
pass
else:
if len(caption) > 100:
logger.info(f"{caption:.100}...")
else:
logger.info(f"{caption}")
async def like(self, media):
"""
Check if the media satisfy the prerequisites and eventually it will
send a like.
Args:
media: The media to like.
Retruns:
None
"""
# Check if the media has already been liked
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM likes WHERE id=?",
(media["id"],)
)
if await row.fetchone():
logger.info("Already liked!")
return
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
else:
if not mediatype in utils.translate_custom_media_type_to_ig(self.settings["like"]["media_type"]):
logger.info("Wrong media type. Not liked!")
return
likes = media["edge_liked_by"]["count"]
if likes < self.settings["like"]["num_of_likes"]["min"] or likes >= self.settings["like"]["num_of_likes"]["max"]:
logger.info("Too many or too few likes. Not liked!")
return
comments = media["edge_media_to_comment"]["count"]
if comments < self.settings["like"]["num_of_comments"]["min"] or comments >= self.settings["like"]["num_of_comments"]["max"]:
logger.info("Too many or too few comments. Not liked!")
return
if self.settings["like"]["rate"] / 100 > random():
await self._like(media["id"])
else:
logger.info("Not liked!")
async def _like(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/likes/{id}/like/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO likes VALUES(?,?)",
(id, int(time.time()))
)
await db.commit()
logger.info("Liked!")
async def _unlike(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/likes/{id}/unlike/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute("INSERT INTO likes WHERE id=?", (id,))
await db.commit()
logger.info("Unliked!")
async def comment(self, media):
"""
Check if the media satisfy the prerequisites and eventually it will
send a comment.
Args:
media: The media to comment.
Retruns:
None
"""
if media["comments_disabled"]:
logger.info("Comments disabled.")
return
if self.settings["comment"]["only_once"]:
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM comments WHERE id=?",
(media["id"],)
)
if await row.fetchone() is None:
logger.info("Already commented.")
return
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
else:
if not mediatype in utils.translate_custom_media_type_to_ig(self.settings["comment"]["media_type"]):
return
likes = media["edge_liked_by"]["count"]
if likes < self.settings["comment"]["num_of_likes"]["min"] or likes >= self.settings["comment"]["num_of_likes"]["max"]:
return
comments = media["edge_media_to_comment"]["count"]
if comments < self.settings["comment"]["num_of_comments"]["min"] or comments >= self.settings["comment"]["num_of_comments"]["max"]:
return
if self.settings["comment"]["rate"] / 100 <= random():
if mediatype == "GraphImage" or mediatype == "GraphSidecar":
comment = self.pic_comments_list[
randint(0, len(self.pic_comments_list)-1)
]
else:
comment = self.video_comments_list[
randint(0, len(self.video_comments_list)-1)
]
await self._comment(media["id"], comment)
else:
logger.info("Not commented!")
async def _comment(self, id, comment, reply_to_id=None):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
payload = {
"comment_text": comment
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/comments/{id}/add/",
headers=headers,
data=payload
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO comments VALUES(?,?,?)",
(id, int(time.time()), comment)
)
await db.commit()
logger.info("Comment posted!")
async def follow(self, media):
"""
Check if the media satisfy the prerequisites and eventually send a
follow request.
Args:
media: The media of the user to be followed.
Retruns:
None
"""
if self.settings["follow"]["rate"] / 100 > random():
await self._follow(media["owner"]["id"])
else:
logger.info("Not followed!")
async def _follow(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/friendships/{id}/follow/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
c = await db.execute("SELECT * FROM users WHERE id=?", (id,))
if c.rowcount:
await db.execute(
"""
UPDATE users SET
ts_following=?, following=?
WHERE id=?
""",
(int(time.time()), True, id)
)
else:
await db.execute(
"INSERT INTO users VALUES(?,?,?,?,?)",
(id, None, int(time.time()), False, True)
)
await db.commit()
logger.info("Follow request sent!")
async def unfollow(self, id):
return
async def _unfollow(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/friendships/{id}/unfollow/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"UPDATE users SET following=false WHERE id=?",
(id,)
)
await db.commit()
async def backup(self):
while 1:
logger.info("Backing up database...")
for table_name in ["users", "likes", "comments"]:
if self.settings["backup"][table_name]:
async with aiosqlite.connect("./piggy.db") as db:
rows = await db.execute(
f"SELECT * FROM '{table_name}'"
)
header = [i[0] for i in rows.description]
rows = await rows.fetchall()
if self.settings["backup"]["format"] == "csv":
await utils.to_csv(table_name, header, rows)
elif self.settings["backup"]["format"] == "json":
await utils.to_json(table_name, header, rows)
else:
logger.warning(
f"""Unsupported file format: {self.settings['backup']['format']}."""
)
await asyncio.sleep(
utils.interval_in_seconds(self.settings["backup"]["every"])
)
async def close(self):
logger.info("\nClosing session...")
# Close the http session
await self.session.close()
async def get_user_by_username(self, username):
res = await self.http_request(
"GET",
f"https://www.instagram.com/{username}/",
params="__a:1"
)
return json.loads(
regex.findall(
r"<script[^>]*>window._sharedData = (.*?)</script>",
regex.findall(
r"<body[^>]*>(.*)</body>",
res,
flags=regex.DOTALL
)[0],
flags=regex.DOTALL
)[0][:-1])["entry_data"]["ProfilePage"][0]["graphql"]["user"]
# -----------------------------------------------------------------------------
async def download(self, media):
id = media["id"]
url = media["display_url"]
format = regex.findall(r".([a-zA-Z]+)$", url)[0]
if media["__typename"] != "GraphImage" or await self.pic_already_saved(id):
return
height = media["dimensions"]["height"]
width = media["dimensions"]["width"]
try:
caption = media["edge_media_to_caption"]["edges"][0]["node"]["text"]
except IndexError:
tags = []
pass
else:
if await self.download_pic(url, id, format):
logger.info(f"Caption: {caption}")
tags = regex.findall(r"#([\p{L}0-9_]+)", caption)
logger.info(f"Tags: {tags}")
else:
return
await self.save_to_database(id, type, height, width, url, tags)
async def download_pic(self, url, id, format):
logger.info(f"Downloading {id}")
async with aiohttp.ClientSession() as session:
try:
async with session.get(url) as r:
if r.status == 200:
f = await aiofiles.open(
f"./images/{id}.{format}",
mode="wb"
)
await f.write(await r.read())
await f.close()
return True
else:
return False
except TimeoutError:
return False
async def pic_already_saved(self, id):
logger.debug("Checking database.")
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM pics WHERE id=?",
(id,)
)
if await row.fetchone() is None:
return False
else:
return True
async def save_to_database(self, id, type, height, width, url, tags):
tags = json.dumps(tags)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO pics VALUES(?,?,?,?,?)",
(id, height, width, url, tags)
)
await db.commit()
| 2.453125 | 2 |
yaplee/server.py | ThisIsMatin/Yaplee | 1 | 12799597 | import os
import random
import tempfile
import webbrowser
import time
import uuid
import socket
import shutil
import subprocess
import pathlib
from bs4 import BeautifulSoup
from yaplee.errors import UnknownTemplateValue
from yaplee.js.converter import JSFunc
class Server:
def __init__(self, meta) -> None:
self.port = meta['config']['port']
self.templates = meta['templates']
self.tree = meta['tree']
self.opentab = meta['config']['opentab']
self.tempuuid = ''
self.module_path = str(pathlib.Path(__file__).resolve().parent)
self.temp_uuid, self.temp_path = self.__gen_yaplee_temp()
def is_port_open(self):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_connection = ('127.0.0.1', self.port)
port_open = a_socket.connect_ex(local_connection)
a_socket.close()
return not (not port_open)
def __gen_yaplee_temp(self):
self.tempuuid = uuid.uuid1().hex[:15]
path = os.path.join(tempfile.gettempdir(), self.tempuuid)
if not os.path.isdir(path):
os.mkdir(path)
return self.tempuuid, path
def generate_files(self):
generated_files = []
js_functions = {}
for template, meta in self.templates.items():
template = template.split('-_-')[0]
to_copy_path = meta['load_name'] if meta['load_name'] else template
to_copy_path = to_copy_path.split(os.sep)[-1]
template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\', os.sep))
shutil.copy(
template,
template_to_copy
)
tag_loc, tags = '', {}
if 'tags' in meta['meta']:
tag_loc, tags = meta['meta']['tags']()
for tag_meta, tag in tags.items():
tag_source = ''
is_tag_has_source = False
tag_name = str(tag_meta.split('-_-')[0])
if tag_name in ['link']:
if 'href' in str(tag):
tag_source = tag.get('href')
is_tag_has_source = True
else:
try:
if 'src' in str(tag):
tag_source = tag.get('src')
is_tag_has_source = True
except:
continue
if is_tag_has_source and ('://' not in tag_source and tag_source):
shutil.copy(
tag_source,
os.path.join(self.temp_path, tag_source)
)
if 'tagvalue' in tag.attrs:
tagvalue = tag.get('tagvalue')
del tag.attrs['tagvalue']
tag.append(tagvalue)
elif 'functions' in meta['meta']:
js_functions = {i.__name__:i for i in meta['meta']['functions']}
elif 'style' in meta['meta']:
if type(meta['meta']['style']) is str:
styles = [meta['meta']['style']]
elif type(meta['meta']['style']) is list:
styles = meta['meta']['style']
else:
raise UnknownTemplateValue(
'template style must be list or string (one style)'
)
tag_loc, tags = 'head', {
str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag(
'link', rel='stylesheet', href=style
) for style in styles
}
for style in styles:
shutil.copy(
style,
os.path.join(self.temp_path, style)
)
with open(template_to_copy, 'r+') as file:
template_data = file.read()
soup = BeautifulSoup(template_data, 'html.parser')
for tagname, tag in tags.items():
soup.find(tag_loc).append(tag)
for funcname, function in js_functions.items():
unique_id = str(uuid.uuid1()).split('-')[0]
soup.html.append(soup.new_tag('script', id=unique_id))
soup.find('script', {'id': unique_id}).append(
'function '+funcname+'(){ '+
str(JSFunc(function))+
' }'
)
file.truncate(0)
file.write(soup.prettify())
del file
generated_files.append(to_copy_path)
if 'index.html' not in generated_files:
with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file:
nohtml_base = file.read()
file.close()
del file
nohtml_base = nohtml_base.replace('{% avaliable_paths %}',
'' if not self.templates else
'<h4>Avaliable paths : {}</h4>'.format(
', '.join(['<a style="text-decoration: none;" href="{}" target="_blank">{}</a>'.format(
i.split('-_-')[0] if j['load_name'] == None else j['load_name'],
i.split('-_-')[0] if not j['name'] else j['name'].title()
) for i, j in self.templates.items()])
)
)
with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file:
file.write(nohtml_base)
def start(self):
self.generate_files()
if self.opentab:
webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port)))
time.sleep(1)
yield self.temp_uuid, self.temp_path
subprocess.run(
('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory "'+self.temp_path+'"',
shell=True
)
def remove_yaplee_dir(self):
if os.path.isdir(os.path.join(tempfile.gettempdir(), self.tempuuid)):
shutil.rmtree(os.path.join(tempfile.gettempdir(), self.tempuuid))
| 2.125 | 2 |
extract_unique_tags_es_xyz.py | MikhailKuklin/Scripts-for-Software-of-Computational-Chemistry-and-Physics | 0 | 12799598 | # import module and read xyz file
from ase.io import read, write
file=read('last3.xyz', index=":")
# create list of tags
tags = []
for structure in file:
if structure.info['config_type'] not in tags:
tags.append(structure.info['config_type'])
# extract unique tags and energy sigma
dict={}
for i in tags:
dict.setdefault(i, [])
for key in tags:
for structure in file:
if structure.info['config_type'] == key and structure.info['energy_sigma'] not in dict.get(key):
dict[key].append(structure.info['energy_sigma'])
| 2.609375 | 3 |
Prescp_transf/venv_lapt/main.py | kowalskaw/Perspective_transformation | 0 | 12799599 | import cv2
import numpy as np
from matplotlib import pyplot as plt
l: list = []
img = None
img_cp = None
def draw_circle(event, x, y, flags, param):
global l
global img
global img_cp
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1)
l.append([x, y])
cv2.imshow('image', img_cp)
if len(l) == 4:
print(l)
pts1 = np.float32(l)
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(img, M, (300, 300))
cv2.imshow('Original image', img_cp)
cv2.imshow('Final', dst)
img_cp = img.copy()
l.clear()
def road_straight():
global img
global img_cp
img = cv2.imread('road.jpg')
img = cv2.resize(img, dsize=(1000, 1000))
img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)
img_cp = img.copy()
cv2.namedWindow('image')
cv2.imshow('image', img)
cv2.setMouseCallback('image', draw_circle)
cv2.waitKey()
cv2.destroyAllWindows()
return
road_straight()
| 2.984375 | 3 |
tests/test_text.py | UUDigitalHumanitieslab/tei_reader | 13 | 12799600 | <gh_stars>10-100
import unittest
from difflib import Differ
from xml.dom import minidom
from os import linesep
from .context import get_files, tei_reader
from pprint import pprint
class TestText(unittest.TestCase):
def test_files(self):
differ = Differ()
reader = tei_reader.TeiReader()
for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')):
corpora = reader.read_file(tei)
transformed = corpora.text
with open(expected, encoding='utf-8') as f:
diffs = list(diff for diff in differ.compare(
[line.strip() for line in f.readlines()],
[line.strip() for line in transformed.splitlines(keepends=False)]))
self.assertEqual(len([diff for diff in diffs if diff[0:2] != ' ']), 0, "{0} not transformed as expected:\n{1}".format(tei, linesep.join(diffs)))
if __name__ == '__main__':
unittest.main()
| 2.5 | 2 |
scryptos/crypto/attack/__init__.py | scryptos/scryptoslib | 30 | 12799601 | <gh_stars>10-100
import scryptos.crypto.attack.rsautil as rsautil
import scryptos.crypto.attack.knapsackutil as knapsackutil
import scryptos.crypto.attack.prngutil as prngutil
| 1.117188 | 1 |
docs/00.Python/demo_pacages/test.py | mheanng/PythonNote | 0 | 12799602 | <reponame>mheanng/PythonNote
from p1 import *
import p1.m1
import p1.m2
import p1.m3
import p1.m4
p1.m4.mm_main()
import p1.pp1.a1
import p1.pp1.a2
import p1.pp1.a3 | 1.085938 | 1 |
wav2vec/project/data_processor/dataset.py | rocabrera/audio-learning | 0 | 12799603 | <reponame>rocabrera/audio-learning
import re
import os
import json
import pandas as pd
from glob import glob
from abc import ABC, abstractmethod
class DataBase(ABC):
@abstractmethod
def make_tidy(self):
pass
@abstractmethod
def parse_data(self) -> pd.DataFrame:
pass
class MLS(DataBase):
ext = ".flac"
basename = "multi_speech_librespeech"
def __init__(self, data_train_dir, data_test_dir, data_dev_dir):
self.train_path = data_train_dir
self.test_path = data_test_dir
self.dev_path = data_dev_dir
def _create_path(self, path_type:str, audio_code:str):
match = re.search("(\d+)_(\d+)_(\d+)",audio_code)
return os.path.join(path_type, "audio", match.group(1), match.group(2), "".join([audio_code, self.ext]))
def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame:
path_label = os.path.join(path_type, "transcripts.txt")
df = pd.read_csv(path_label, sep="\t",header=None,names=["audio_code", "text"])
df = df.assign(**{"type":type_,
"file":df.audio_code.apply(lambda x:
self._create_path(path_type,x))
})
return df.filter(["file", "text", "type"])
def make_tidy(self):
pass
def parse_data(self) -> pd.DataFrame:
df_train = self._parse_type(self.train_path, "train")
df_test = self._parse_type(self.test_path, "test")
df_dev = self._parse_type(self.dev_path, "dev")
return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename)
class CommonVoice(DataBase):
ext = ".mp3"
basename = "common_voice"
def __init__(self, main_path):
self.train_path = os.path.join(main_path, "train.tsv")
self.test_path = os.path.join(main_path, "test.tsv")
self.dev_path = os.path.join(main_path, "validated.tsv")
self.audios_path = os.path.join(main_path, "clips")
def _create_path(self, audio_name):
return os.path.join(self.audios_path, audio_name)
def _parse_type(self, df_path, type_):
df = pd.read_csv(df_path, sep = "\t")
return (df.assign(**{"type":type_, "file":df["path"].apply(self._create_path)})
.rename(columns={"sentence":"text"})
.filter(["file", "text", "type"]))
def make_tidy(self):
pass
def parse_data(self) -> pd.DataFrame:
df_train = self._parse_type(self.train_path, "train")
df_test = self._parse_type(self.test_path,"test")
df_dev = self._parse_type(self.dev_path,"dev")
return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) | 2.921875 | 3 |
UnicornHAT/clock.py | gerb030/PiTrials | 0 | 12799604 | <gh_stars>0
#!/usr/bin/env python
import colorsys
import time
from sys import exit
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
exit('This script requires the pillow module\nInstall with: sudo pip install pillow')
FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12)
unicornhathd.rotation(270)
unicornhathd.brightness(0.8)
width, height = unicornhathd.get_shape()
text_x = width
text_y = height
font_file, font_size = FONT
font = ImageFont.truetype(font_file, font_size)
text_width, text_height = width, 0
except KeyboardInterrupt:
unicornhathd.off()
| 2.328125 | 2 |
OOP_formy/src/pages/radio_button_page.py | AntonioIonica/Automation_testing | 0 | 12799605 | <gh_stars>0
"""
Radio button page
clicking them in different order
"""
from pages.base_page import BasePage
import time
from selenium.webdriver.common.by import By
class RadioButtonPage(BasePage):
RADIO_BTN_1 = (By.ID, 'radio-button-1')
RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input')
RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input')
def click_on_btn_3(self):
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
btn_2.click()
time.sleep(1)
btn_1.click()
time.sleep(1)
btn_3.click()
def click_on_btn_2(self):
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
btn_1.click()
time.sleep(1)
btn_3.click()
time.sleep(1)
btn_2.click()
def click_on_btn_1(self):
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
btn_3.click()
time.sleep(1)
btn_2.click()
time.sleep(1)
btn_1.click()
| 2.78125 | 3 |
frappe-bench/apps/erpnext/erpnext/patches/v10_0/update_project_in_sle.py | Semicheche/foa_frappe_docker | 1 | 12799606 | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']:
frappe.db.sql(""" update
`tabStock Ledger Entry` sle, `tab{0}` parent_doc
set
sle.project = parent_doc.project
where
sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is null
and parent_doc.project is not null and parent_doc.project != ''""".format(doctype), doctype)
| 1.859375 | 2 |
matplotlib/two_line_in_same_plot.py | abhayanigam/Learn_Python_Programming | 1 | 12799607 | <reponame>abhayanigam/Learn_Python_Programming<filename>matplotlib/two_line_in_same_plot.py<gh_stars>1-10
import numpy as np
from matplotlib import pyplot as plt
p = np.array([0,10])
p2 = p*2
p3 = p*3
plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2)
plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3)
plt.title("Two line in same plot")
plt.xlabel("X-Axis")
plt.ylabel("Y-Axis")
plt.show() | 3.625 | 4 |
pypy/jit/codegen/detect_cpu.py | camillobruni/pygirl | 12 | 12799608 | <filename>pypy/jit/codegen/detect_cpu.py
"""
Processor auto-detection
"""
import sys, os
class ProcessorAutodetectError(Exception):
pass
def autodetect():
mach = None
try:
import platform
mach = platform.machine()
except ImportError:
pass
if not mach:
platform = sys.platform.lower()
if platform.startswith('win'): # assume an Intel Windows
return 'i386'
# assume we have 'uname'
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
raise ProcessorAutodetectError, "cannot run 'uname -m'"
if mach == 'x86_64' and sys.maxint == 2147483647:
mach = 'x86' # it's a 64-bit processor but in 32-bits mode, maybe
try:
return {'i386': 'i386',
'i486': 'i386',
'i586': 'i386',
'i686': 'i386',
'i86pc': 'i386', # Solaris/Intel
'x86': 'i386', # Apple
'Power Macintosh': 'ppc',
}[mach]
except KeyError:
raise ProcessorAutodetectError, "unsupported processor '%s'" % mach
| 2.71875 | 3 |
vtk/styles.py | voltverse/vtk | 0 | 12799609 | <filename>vtk/styles.py
import vtk.term as term
class Style:
def __init__(self):
self.code = 0
def _render(self):
return "{}[{}m".format(term.ESCAPE_CHARACTER, self.code)
class Style_Reset(Style):
def __init__(self):
super()
self.code = 0
class Style_Bold(Style):
def __init__(self):
super()
self.code = 1
class Style_Faint(Style):
def __init__(self):
super()
self.code = 2
class Style_Italic(Style):
def __init__(self):
super()
self.code = 3
class Style_Underline(Style):
def __init__(self):
super()
self.code = 4
class Style_Invert(Style):
def __init__(self):
super()
self.code = 7
class Style_Strikethrough(Style):
def __init__(self):
super()
self.code = 9
class Colour(Style):
def __init__(self):
super()
def _render(self, isForeground = True):
if isForeground:
return "{}[{}m".format(term.ESCAPE_CHARACTER, self.code)
else:
return "{}[{}m".format(term.ESCAPE_CHARACTER, self.code + 10)
class Colour_Transparent(Colour):
def _render(self, isForeground = True):
return ""
class Colour_Black(Colour):
def __init__(self):
super()
self.code = 30
class Colour_DarkRed(Colour):
def __init__(self):
super()
self.code = 31
class Colour_DarkGreen(Colour):
def __init__(self):
super()
self.code = 32
class Colour_DarkYellow(Colour):
def __init__(self):
super()
self.code = 33
class Colour_DarkBlue(Colour):
def __init__(self):
super()
self.code = 34
class Colour_DarkMagenta(Colour):
def __init__(self):
super()
self.code = 35
class Colour_DarkCyan(Colour):
def __init__(self):
super()
self.code = 36
class Colour_Grey(Colour):
def __init__(self):
super()
self.code = 37
class Colour_DarkGrey(Colour):
def __init__(self):
super()
self.code = 90
class Colour_Red(Colour):
def __init__(self):
super()
self.code = 91
class Colour_Green(Colour):
def __init__(self):
super()
self.code = 92
class Colour_Yellow(Colour):
def __init__(self):
super()
self.code = 93
class Colour_Blue(Colour):
def __init__(self):
super()
self.code = 94
class Colour_Magenta(Colour):
def __init__(self):
super()
self.code = 95
class Colour_Cyan(Colour):
def __init__(self):
super()
self.code = 96
class Colour_White(Colour):
def __init__(self):
super()
self.code = 97 | 2.6875 | 3 |
src/conftest.py | hashihei/python-tools-ftpsdel | 0 | 12799610 | <filename>src/conftest.py
#
#
# RefURL: https://docs.pytest.org/en/latest/example/parametrize.html
#
#
#
# import
#
import pytest
def pytest_addoption(parser):
"""Add pytest command options."""
#RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method
parser.addoption(
"--change-conf",
action="store",
default="ftpdel.conf",
help="ftpdel setting file"
)
@pytest.fixture
def cli_conf(request):
return request.config.getoption('--change-conf')
| 1.96875 | 2 |
src/opedia_dataset_validator/validator.py | ctberthiaume/opedia_dataset_validator | 0 | 12799611 | from __future__ import unicode_literals
from .error import error
from io import open
import arrow
import os
import oyaml as yaml
import pandas as pd
import re
import sys
# Load dataset file specifications
spec_file_name = 'dataset_file_def.yaml'
spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name)
with open(spec_file_path, encoding='utf-8') as fh:
spec = yaml.load(fh)
def validate(input_path):
if (sys.version_info > (3, 0)):
wb = pd.read_excel(input_path, sheet_name=None, na_values=[],
keep_default_na=False, dtype=str)
else:
wb = pd.read_excel(input_path, sheet_name=None, na_values=[],
keep_default_na=False, dtype=unicode)
errors = []
errors.extend(validate_filename(input_path, spec))
errors.extend(validate_sheet_metadata(wb, spec))
errors.extend(validate_sheet_vars(wb, spec))
errors.extend(validate_sheet_data(wb, spec))
return errors
def validate_column_datetimes(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
# Now look for format errors in non-empty rows
present = series[series.str.len() > 0]
for idx, val in present.iteritems():
try:
dt = arrow.get(val, colspec['format'])
except ValueError as e:
errors.append(error({
'message': 'error in datetime string: %s' % e,
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
except arrow.parser.ParserError as e:
errors.append(error({
'message': 'invalid datetime string - should match %s' % colspec['format'],
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_column_floats(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
# Convert to floats
converted = pd.to_numeric(series, errors='coerce')
# Non-numeric strings are now NaN
# Flag NaN as errors
nonnumeric_errors = series[pd.isna(converted)]
for idx, val in nonnumeric_errors.iteritems():
errors.append(error({
'message': 'invalid value',
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
# Check range
min_errors = None
max_errors = None
if colspec.get('min', None) is not None:
min_errors = series[converted < colspec['min']]
for idx, val in min_errors.iteritems():
errors.append(error({
'message': 'value less than minimum of {}'.format(colspec['min']),
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
if colspec.get('max', None) is not None:
max_errors = series[converted > colspec['max']]
for idx, val in max_errors.iteritems():
errors.append(error({
'message': 'value greater than maximum of {}'.format(colspec['max']),
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_column_generic(series, colspec, sheet):
errors = []
required = colspec.get('required', None)
na = colspec.get('na', None)
if not required:
# Empty cell is a valid value. Remove empty cells before further checks
series = series[series.str.len() > 0]
elif str(na) == '':
# Empty cell is a valid value. Remove empty cells before further checks
series = series[series.str.len() > 0]
else:
# NA is None or is not the empty string, therefore empty cells are not
# valid values. Flag as errors.
empty_errors = series[series.str.len() == 0]
for idx, val in empty_errors.iteritems():
errors.append(error({
'message': 'missing required field',
'row': idx,
'column': series.name,
'sheet': sheet
}))
# Now remove empty cells
series = series[series.str.len() > 0]
if na is not None:
# Remove NA values before further checks
series = series[series != na]
return (errors, series)
def validate_column_strings(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
if colspec.get('max', None) is not None:
maxlen_errors = series[series.str.len() > colspec['max']]
for idx, val in maxlen_errors.iteritems():
errors.append(error({
'message': 'string length > %d' % colspec['max'],
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_filename(input_path, spec):
fn = os.path.basename(input_path)
errors = []
filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\.xlsx$')
m = filename_re.match(fn)
if not m:
errors.append(error({
'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx',
'value': fn
}))
else:
try:
dt = arrow.get(m.group('date'), spec['file_date'])
except ValueError as e:
errors.append(error({
'message': 'error in filename datetime string: %s' % e,
'value': m.group('date')
}))
except arrow.parser.ParserError as e:
errors.append(error({
'message': 'date in filename must be in %s format' % spec['file_date'],
'value': m.group('date')
}))
if not re.match(r'^v.+$', m.group('version')):
errors.append(error({
'message': 'version string in filename must start with "v"',
'value': fn
}))
return errors
def validate_sheet_data(wb, spec):
errors = []
if not 'data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'data',
'sheet': 'data'
}))
return errors
df = wb['data']
errors.extend(validate_sheet_generic(df, 'data', spec))
# Next check columns in 'data' that were defined in 'vars_meta_data'
# First make sure that 'vars_meta_data' doesn't have any errors, if it does
# don't bother with any more checks here
if len(validate_sheet_vars(wb, spec)) > 0:
return errors
# Now check custom data columns
required_columns = list(spec['columns']['data'].keys())
df_data = df.drop(required_columns, axis='columns')
# Collect variable short names from vars_meta_data sheet and check that
# data columns in 'data' sheet match data columns defined in 'vars' sheet.
vars_defined = wb['vars_meta_data']['var_short_name'].tolist()
vars_found = df_data.columns.tolist()
extra_defined = set(vars_defined).difference(set(vars_found))
extra_found = set(vars_found).difference(set(vars_defined))
if extra_defined:
errors.append(error({
'message': 'some data variables were defined in the "%s" worksheet but were not found in the "%s" worksheet' % ('vars_meta_data', 'data'),
'value': ', '.join(extra_defined)
}))
if extra_found:
errors.append(error({
'message': 'some data variables were found in the "%s" worksheet but were not defined in the "%s" worksheet' % ('data', 'vars_meta_data'),
'value': ', '.join(extra_found)
}))
# Now validate the actual data only on the condition of
# proper missing values.
# TODO: Is there any type-checking expected in custom vars?
vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist()
for var, na in zip(vars_defined, vars_missing_value):
if var not in extra_defined:
sheet = 'vars_meta_data'
colspec = { 'required': True, 'na': na }
empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data')
errors.extend(empty_errors)
return errors
def validate_sheet_generic(df, sheet, spec):
errors = []
required_columns = list(spec['columns'][sheet].keys())
if df.columns.tolist()[:len(required_columns)] != required_columns:
errors.append(error({
'message': 'the first %d columns of the "%s" worksheet should be %s' % (len(required_columns), sheet, required_columns),
'value': str(df.columns.tolist()),
'sheet': sheet
}))
return errors
# Validate cells
for colname, colspec in spec['columns'][sheet].items():
v = validator_lookup[colspec['type']]
errors.extend(v(df[colname], colspec, sheet))
return errors
def validate_sheet_metadata(wb, spec):
errors = []
if not 'dataset_meta_data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'dataset_meta_data',
'sheet': 'dataset_meta_data'
}))
return errors
df = wb['dataset_meta_data']
errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec))
return errors
def validate_sheet_vars(wb, spec=spec):
errors = []
if not 'vars_meta_data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'vars_meta_data',
'sheet': 'vars_meta_data'
}))
return errors
df = wb['vars_meta_data']
errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec))
return errors
# Register column validators in lookup
validator_lookup = {
'float': validate_column_floats,
'string': validate_column_strings,
'datetime': validate_column_datetimes,
'generic': validate_column_generic
}
| 2.53125 | 3 |
tools/grad_cam_tools/gfl_post_process.py | HAOCHENYE/yehc_mmdet | 1 | 12799612 | import torch.nn as nn
import torch.nn.functional as F
import torch
SCORE_THRESH = 0.3
STRIDE_SCALE = 8
IOU_THRESH = 0.6
class Integral(nn.Module):
"""A fixed layer for calculating integral result from distribution.
This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
P(y_i) denotes the softmax vector that represents the discrete distribution
y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
Args:
reg_max (int): The maximal value of the discrete set. Default: 16. You
may want to reset it according to your new dataset or related
settings.
"""
def __init__(self, reg_max=16):
super(Integral, self).__init__()
self.reg_max = reg_max
self.register_buffer('project',
torch.linspace(0, self.reg_max, self.reg_max + 1))
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
return x
def IouCal(Box1, Box2):
inner_x1 = torch.max(Box1[0], Box2[0])
inner_y1 = torch.max(Box1[1], Box2[1])
inner_x2 = torch.min(Box1[2], Box2[2])
inner_y2 = torch.min(Box1[3], Box2[3])
area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1)
area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \
(Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \
area_inner
return torch.max(torch.tensor(0.), area_inner / area)
def nms(Bboxes):
Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True)
record_dict = set()
res = []
for i in range(len(Bboxes)):
if i not in record_dict:
record_dict.add(i)
res.append(Bboxes[i])
else:
continue
for j in range(i + 1, len(Bboxes)):
Iou = IouCal(Bboxes[i], Bboxes[j])
if Iou > IOU_THRESH:
record_dict.add(j)
continue
return res
def gfl_post_process(output, extra_info):
integral = Integral(16)
ml_scores, ml_bboxes = output
scale_factor = extra_info["scale_factor"]
levels = 5
total_bboxes = []
for level in range(levels):
stride = 2**(level)*8
'''默认输出顺序为 小stride->大stride'''
feat_h, feat_w = ml_scores[level].shape[2:]
scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid()
bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride
for i in range(len(scores)):
if scores[i] > SCORE_THRESH:
x = i % int(feat_w) * stride
y = i // int(feat_w) * stride
x1 = x - bboxes[i][0]
y1 = y - bboxes[i][1]
x2 = x + bboxes[i][2]
y2 = y + bboxes[i][3]
score_loc = scores[i]
box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor)
total_bboxes.append(torch.cat([box, score_loc], dim=0))
nmsBoxes = nms(total_bboxes)
return nmsBoxes
| 2.921875 | 3 |
intro.py | samsmusa/My-manim-master | 0 | 12799613 | <reponame>samsmusa/My-manim-master
from manimlib import *
# from manimlib.imports import *
import numpy as np
text = Text("hello")
text2 = Text("how are you")
text3 = Text("who are you")
ltext = Tex("hello world")
ltext2 = Tex("hey man!")
class intro(Scene):
def construct(self):
text = TexText("hello"," OVI"," how"," are"," you?")
for i in range(5):
if i == 1:
text[i].set_color(RED)
else:
text[i].set_color(GREEN)
self.play(Write(text[i]))
self.wait(1)
kw = {"run_time": 3, "path_arc": PI / 2}
text2 = TexText("Hi RAKIB!")
text3 = TexText("hey ANOY bro!")
self.play(TransformMatchingShapes(text, text2, **kw))
self.wait()
self.play(TransformMatchingShapes(text2, text3, **kw))
self.wait()
class mohi(Scene):
def construct(self):
grid = NumberPlane((-10, 10), (-5, 5))
source = Text("আমাএর What lies behind you and what lies in front of you", height=0.4)
target = Text("pales in comparison to what lies inside of you", height=0.5)
source1 = Text("What's your Openion, MOHI?", height = 0.8)
source.set_color(GREEN)
source1.set_color(RED)
self.play(
ShowCreation(grid)
)
self.play(Write(source))
self.wait()
kw = {"run_time": 3, "path_arc": PI}
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, source1, **kw))
self.wait()
class intf(Scene):
def construct(self):
q = TexText("Beauty", " is", " the"," first"," test", font ="Arial" , font_size = 44, text_color = RED ).set_color(RED)
qoute2 = Text("there is no permanent place in the world for ugly mathematics")
a = (q[1]).next_to(q[0],UR)
b = (q[2]).next_to(q[0],UL)
c = (q[3]).next_to(q[0],DR)
d = (q[4]).next_to(q[0],DL)
e = VGroup(a,b,c,d)
# qoute2.next_to(qoute, DOWN)
self.play(FadeIn(e))
self.play(Write(q),run_time = 3)
self.wait()
# self.play(Write(qoute2),run_time = 3)
# self.wait()
class newc(Scene):
def construct(self):
text = Text("This is a regular text")
self.play(Write(text))
self.wait(3)
class typeOfText(Scene):
def construct(self):
tipes = TexText("""
This is a regular text,
$this is a formulas$,
$$this is a formula$$
""")
self.play(Write(tipes))
self.wait(3)
class deff(Scene):
def construct(self):
text = TexText("""
This is a regular text,
$\\displaystyle\\frac{x}{y}$,
$$x^2+y^2=a^2$$
""")
self.play(Write(text))
self.wait(3)
#position relative to scereen
class tidp(Scene):
def construct(self):
text = TexText("Hello", "i'm", " musa", " hey")
text[0].to_edge(RIGHT)
text[1].to_edge(DOWN)
text[2].to_edge(LEFT)
text[3].to_edge(UP)
self.play(Write(text))
self.wait(3)
class cp(Scene):
def construct(self):
text = Text("text")
text2 = Text("central text")
text.move_to(0.25*UP)
self.play(Write(text),Write(text2))
self.wait(3)
class cp2(Scene):
def construct(self):
text = Text("hello")
text2 = Text("how are you")
text3 = Text("who are you")
text2.move_to(3*DOWN+3*LEFT)
self.play(Write(text),Write(text2))
self.wait()
text3.move_to(1*UP+2*RIGHT)
self.play(Write(text3))
self.wait()
#relative position
class cp3(Scene):
def construct(self):
self.play(Write(text))
self.wait()
text2.next_to(text, LEFT, buff=1)
self.play(Write(text2))
self.wait()
text.shift(UP*3)
self.play(Write(text))
self.wait()
#rotation
class ro(Scene):
def construct(self):
text.shift(UP)
text.rotate(PI/4)
self.play(ShowCreation(text))
self.wait()
text.rotate(PI/4)
self.wait()
text.rotate(PI/4)
self.wait()
text.flip(DOWN)
self.wait()
#latex
class la(Scene):
def construct(self):
textHuge = Tex("{\\Huge Huge Text 012.\\#!?} Text")
texthuge = Tex("{\\huge huge Text 012.\\#!?} Text")
textLARGE = Tex("{\\LARGE LARGE Text 012.\\#!?} Text")
textLarge = Tex("{\\Large Large Text 012.\\#!?} Text")
textlarge = Tex("{\\large large Text 012.\\#!?} Text")
textNormal = Tex("{\\normalsize normal Text 012.\\#!?} Text")
textsmall = Tex("{\\small small Text 012.\\#!?} Texto normal")
textfootnotesize = Tex("{\\footnotesize footnotesize Text 012.\\#!?} Text")
textscriptsize = Tex("{\\scriptsize scriptsize Text 012.\\#!?} Text")
texttiny = Tex("{\\tiny tiny Texto 012.\\#!?} Text normal")
textHuge.to_edge(UP)
texthuge.next_to(textHuge,DOWN,buff=0.1)
textLARGE.next_to(texthuge,DOWN,buff=0.1)
textLarge.next_to(textLARGE,DOWN,buff=0.1)
textlarge.next_to(textLarge,DOWN,buff=0.1)
textNormal.next_to(textlarge,DOWN,buff=0.1)
textsmall.next_to(textNormal,DOWN,buff=0.1)
textfootnotesize.next_to(textsmall,DOWN,buff=0.1)
textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1)
texttiny.next_to(textscriptsize,DOWN,buff=0.1)
self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny)
self.wait(3)
#transform
class tr(Scene):
def construct(self):
self.play(Write(text))
self.wait()
self.play(ReplacementTransform(text,text2))
self.wait()
class trl(Scene):
def construct(self):
formula = Tex(
"\\frac{d}{dx}", #0
"(", #1
"u", #2
"+", #3
"v", #4
")", #5
"=", #6
"\\frac{d}{dx}", #7
"u", #8
"+", #9
"\\frac{d}{dx}", #10
"v" #11
, font_size=70)
# formula
VGroup(formula[0::2]).set_color(RED)
VGroup(formula[1::2]).set_color(BLUE)
self.play(Write(formula[0:7]))
self.wait()
self.play(
ReplacementTransform(formula[2].copy(),formula[8]),
ReplacementTransform(formula[4].copy(),formula[11]),
ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3
)
self.wait()
self.play(
ReplacementTransform(formula[0].copy(),formula[7]),
ReplacementTransform(formula[0].copy(),formula[10]), run_time=3
)
self.wait()
class rtl2(Scene):
def construct(self):
formula = Tex(
"\\frac{d}{dx}", #0
"(",#1
"u",#2
"+",#3
"v",#4
")",#5
"=",#6
"\\frac{d}{dx}",#7
"u",#8
"+",#9
"\\frac{d}{dx}",#10
"v", font_size = 70
)
for letter, color in [("u",RED),("v",BLUE)]:
formula.set_color_by_tex(letter,color)
self.play(Write(formula[0:7]))
self.wait()
self.play(
ReplacementTransform(formula[2].copy(),formula[8]),
ReplacementTransform(formula[4].copy(),formula[11]),
ReplacementTransform(formula[3].copy(),formula[9])
)
self.wait()
self.play(
ReplacementTransform(formula[0].copy(),formula[7]),
ReplacementTransform(formula[0].copy(),formula[10])
)
self.wait()
class rtl3(Scene):
def construct(self):
formula1 = Tex(
"\\neg", #0
"\\forall", #1
"x", #2
":", #3
"P(x)", #4
)
formula2 = Tex(
"\\exists", #0
"x", #1
":", #2
"\\neg", #3
"P(x)"
)
for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]:
formula.scale(size)
formula.move_to(pos)
self.play(Write(formula1))
self.wait()
changes = [
[(0,1,2,3,4),
(3,0,1,2,4)],
]
for pre_ind,post_ind in changes:
self.play(*[
ReplacementTransform(formula1[i].copy(),formula2[j])
for i,j in zip(pre_ind,post_ind)
],
run_time =2
)
self.wait()
class rtl4(Scene):
def construct(self):
formula1 = Tex(
"\\neg", #0
"\\forall", #1
"x", #2
":", #3
"P(x)", #4
)
formula2 = Tex(
"\\exists", #0
"x", #1
":", #2
"\\neg", #3
"P(x)" #4
)
parametters = [(2,2*UP,formula1,GREEN,"\\forall"),
(2,2*DOWN,formula2,ORANGE,"\\exists")]
for size,pos,formula,col,sim in parametters:
formula.scale(size)
formula.move_to(pos)
formula.set_color_by_tex(sim,col)
formula.set_color_by_tex("\\neg",PINK)
self.play(Write(formula1))
self.wait()
changes =[
[(2,3,4),(1,2,4)],
[(0,),(3,)],
[(1,0),(0,)]
]
for pre_ind, post_ind in changes:
self.play(*[
ReplacementTransform(formula1[i].copy(),formula2[j])
for i,j in zip(pre_ind,post_ind)
],
run_time =2
)
self.wait()
| 3.125 | 3 |
generateList.py | 42ip/animatedStickersDB | 4 | 12799614 | import sys,os
arr = []
files = os.listdir(sys.path[0] + '/stickersraw')
st = "{{$a := index .CmdArgs 0 }} \n"
st += "{{$b := cslice "
names = []
for fileName in files:
names.append(fileName.split('.')[0])
names = sorted(names)
n = ""
for name in names:
n += "\"" + name + "\" "
st += n
st += """ }}
{{if or (eq $a "stickers") (eq $a "gifs") (eq $a "gif") (eq $a "gifss") }}
{{deleteTrigger 0 }}
{{if eq (len .Args) 1}}
{{$r := joinStr " " $b.StringSlice}}
{{$r}}
{{else if eq (len .Args) 2}}
{{$c := index .CmdArgs 1}}
{{$s := cslice " " }}
{{range $index,$value := $b}}
{{- if or (hasPrefix $value $c) ( and (eq $a "gifss" ) ( reFind $c $value ) ) -}}
{{$s = $s.Append $value}}
{{- end -}}
{{- end}}
{{$r := joinStr " " $s.StringSlice}}
{{$r := str $r}}
{{$r}}
{{ deleteResponse 30 }}
{{end}}
{{end}}
{{range $b}}
{{- if eq . $a -}}
{{- $link := joinStr "" "https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/" $a ".gif?raw=true" -}}
{{- $link -}}
{{- end -}}
{{- end}}"""
with open(sys.path[0] + "/output.yag", "w") as text_file:
text_file.write(st) | 2.375 | 2 |
Image_Video/image_video_newcode/blur_new.py | marvahm12/PyEng | 0 | 12799615 | import cv2
import sys
import matplotlib.pyplot as plt
def blur_display(infile, nogui=False):
# The first argument is the image
image = cv2.imread(infile)
#conver to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blur it
blurred_image = cv2.GaussianBlur(image, (7,7), 0)
if nogui:
cv2.imwrite('test_blurred.png', blurred_image)
else:
# Show all 3 images
cv2.imwrite("Original_Image.png", image)
cv2.imwrite("Gray_Image.png", gray_image)
cv2.imwrite("Blurred_Image.png", blurred_image)
cv2.waitKey(0)
if __name__ == "__main__":
blur_display(sys.argv[1])
plt.savefig('output/Original_Image.png')
plt.savefig('output/Gray_Image.png')
plt.savefig('output/Blurred_Image.png') | 3.640625 | 4 |
best_practice_examples/multiple_state_variables_bp.py | kallelzied/PythonTutoriel | 0 | 12799616 | <reponame>kallelzied/PythonTutoriel<filename>best_practice_examples/multiple_state_variables_bp.py
"""multiple_state_variables_bp.py: Giving an example of best practicing with multiple state variables."""
__author__ = "<NAME>"
__copyright__ = """
Copyright 2018 multiple_state_variables_bp.py
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
def fibonacci_wrong_way(n):
x = 0
y = 1
l = []
for i in range(n):
l.append(x)
t = y
y = x + y
x = t
print(l)
def fibonacci_correct_way(n):
x, y, l = 0, 1, []
for i in range(n):
l.append(x)
x, y = y, x + y
print(l)
# Entry point
if __name__ == '__main__':
fibonacci_wrong_way(100)
fibonacci_correct_way(100)
| 3.203125 | 3 |
install/data/apps/internalTools/BAMval/BAMvalwrapper.py | inab/openEBench_vre | 1 | 12799617 | <reponame>inab/openEBench_vre
#!/usr/bin/python2.7
import os
import sys
import argparse
import json
import time
import socket # print localhost
import logging
import re
import pprint
import multiprocessing
#import psutil # available memory
import subprocess
import shutil
import glob
import tarfile
import subprocess
import random
out_dir=""
class Mugparams(object):
@staticmethod
def check_json(json_file):
logger = logging.getLogger("lg")
if not os.path.exists(json_file):
raise argparse.ArgumentTypeError("%s does not exist" % json_file)
with open(json_file,'r') as file_data:
try:
data = json.load(file_data)
except ValueError, e:
logger.exception("%s in not a valid json file." % json_file)
return data
@staticmethod
def readable_dir(d):
if not os.path.isdir(d):
raise Exception("readable_dir:{0} is not a directory path or is not accessible".format(d))
if os.access(d, os.R_OK):
return d
else:
raise Exception("readable_dir:{0} is not a readable dir".format(d))
@staticmethod
def writeable_file(f):
if not os.path.isfile(f):
d = os.path.dirname(f)
# TODO Fails if relative path given
if not os.path.isdir(d):
raise Exception("writeable_file:{0} not in a existing directory path, or not accessible".format(d))
else:
if os.access(d, os.W_OK):
return f
else:
raise Exception("writeable_file:{0} is not a writeable dir".format(d))
else:
return f
@staticmethod
def process_arguments(args):
global out_dir
logger = logging.getLogger("lg")
# Setting working directory (project)
proj_idx = next(idx for (idx, d) in enumerate(args.config["arguments"]) if d["name"] == "project")
out_dir = args.root_dir+"/"+args.config["arguments"][proj_idx]["value"]
logger.info("Output file directory set to %s" % out_dir)
# Indexing config arguments by name
arguments_by_name = dict((d["name"], d["value"]) for (index, d) in enumerate(args.config["arguments"]))
args.config["arguments"] = arguments_by_name
# Indexing config input_files by name (name could not be unique - because of allow_multiple)
inputs_by_name = {}
for index,d in enumerate(args.config["input_files"]):
name = args.config["input_files"][index]["name"]
if name in inputs_by_name:
pprint.pprint(inputs_by_name[name])
if type(inputs_by_name[name] is str):
prev = inputs_by_name[name]
inputs_by_name[name]= list()
inputs_by_name[name].append(prev)
inputs_by_name[name].append(d["value"])
else:
inputs_by_name[name]=d["value"]
args.config["input_files"] = inputs_by_name
logger.debug("Configuration file arguments and input_files are:\n %s " % pprint.pformat(args.config))
return 1
@staticmethod
def process_metadata(args):
global out_dir
logger = logging.getLogger("lg")
# Indexing metadata files by file_id ([_id])
metadata_by_id = dict((d["_id"], dict(d)) for (index, d) in enumerate(args.metadata))
args.metadata = metadata_by_id
logger.debug("VRE metadata for input_files is:\n %s " % pprint.pformat(args.metadata))
return 1
#
# Executing pipeline
# Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess
def run_pipeline(args, num_cores, x_rnd):
sort = args.config["input_files"]["sequence"]
sequence = args.root_dir + "/" + args.metadata[sequence_file_id]["file_path"]
nucl_pos_file_id = args.config["input_files"]["nuclPos"]
nucl_pos = args.root_dir + "/" + args.metadata[nucl_pos_file_id]["file_path"]
tmp_dir = "{0}/{1}/str_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
bashCommand = "cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s" % (nucl_pos, sequence, tmp_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
usr_dir = args.root_dir + "/" + args.config["arguments"]["project"]
bashCommand = "cp %s/output/chromdyn_str.pdb %s" % (tmp_dir, usr_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
if "createTrajectory" in args.config["arguments"]["operations"]:
print "do Trajectory"
sequence_file_id = args.config["input_files"]["sequence"]
sequence = args.root_dir + "/" + args.metadata[sequence_file_id]["file_path"]
nucl_pos_file_id = args.config["input_files"]["nuclPos"]
nucl_pos = args.root_dir + "/" + args.metadata[nucl_pos_file_id]["file_path"]
tmp_dir = "{0}/{1}/tra_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
iterations = args.config["arguments"]["createTrajectory:numStruct"]
bashCommand = "cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s" % (nucl_pos, sequence, iterations, tmp_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
usr_dir = args.root_dir + "/" + args.config["arguments"]["project"]
bashCommand = "cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s" % (tmp_dir, tmp_dir, tmp_dir, usr_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
if "create3DfromNucleaR" in args.config["arguments"]["operations"]:
print "do 3D from NucleaR"
gff_file_id = args.config["input_files"]["gffNucleaR"]
gff_file = args.root_dir + "/" + args.metadata[gff_file_id]["file_path"]
gen_reg = args.config["arguments"]["create3DfromNucleaR:genRegion"]
tmp_dir = "{0}/{1}/str_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
bashCommand = " mkdir %s" % tmp_dir
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
assembly = args.metadata[gff_file_id]["meta_data"]["assembly"]
genome_file = "{0}/refGenomes/{1}/{1}.fa".format(args.public_dir,assembly)
bashCommand = " /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s --nucs_output %s --margin 4" % (gff_file, genome_file, gen_reg, tmp_dir +"/nucleR_to_3D_seq.txt", tmp_dir +"/nucleR_to_3D_nucl_pos.txt")
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
bashCommand = "cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s" % (tmp_dir +"/nucleR_to_3D_nucl_pos.txt", tmp_dir +"/nucleR_to_3D_seq.txt", tmp_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
usr_dir = args.root_dir + "/" + args.config["arguments"]["project"]
bashCommand = "cp %s/output/chromdyn_str.pdb %s" % (tmp_dir, usr_dir)
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
return 1
#
# Prepare metadata for the output files
def prepare_results(args, x_rnd):
global out_dir
logger = logging.getLogger("lg")
if (args.out_metadata):
# Create out_metadata JSON
json_data = {}
json_data['output_files']= []
if ("createStructure" in args.config["arguments"]["operations"]) or ("create3DfromNucleaR" in args.config["arguments"]["operations"]):
### PDB_chromatin_structure
pdbMeta = {}
# Set name
# Should coincide with tool.json
pdbMeta["name"] = "PDB_chromatin_structure"
# Set file_path
# Absolute path. Should be better relative to root_dir?
tmp_dir = args.root_dir + "/" + args.config["arguments"]["project"]
pdb_file = tmp_dir + "/chromdyn_str.pdb"
pdbMeta["file_path"] = pdb_file
# Set source_id & taxon_id
pdbMeta["source_id"] = []
if "sequence" in args.config['input_files']:
pdbMeta["source_id"].append(args.config['input_files']["sequence"])
# Set taxon_id
# taxon_id is inherited from the input file (i.e the source_id)
pdbMeta["taxon_id"] = 0
if pdbMeta["source_id"]:
for file_id in pdbMeta["source_id"]:
pprint.pprint(args.metadata)
if args.metadata[file_id]["taxon_id"]:
pdbMeta["taxon_id"] = args.metadata[file_id]["taxon_id"]
break
# Append output_file metadata into JSON data
json_data['output_files'].append(pdbMeta)
if "createTrajectory" in args.config["arguments"]["operations"]:
### chromatin_starting_trajectory_structure
pdbMeta1 = {}
# Set name. Should coincide with tool.json
pdbMeta1["name"] = "PDB_chromatin_starting_structure"
# Set file_path. Absolute path. Should be better relative to root_dir?
tmp_dir = args.root_dir + "/" + args.config["arguments"]["project"]
pdb_file = tmp_dir + "/chromdyn_start_str.pdb"
pdbMeta1["file_path"] = pdb_file
# Set source_id & taxon_id
pdbMeta1["source_id"] = []
if "sequence" in args.config['input_files']:
pdbMeta1["source_id"].append(args.config['input_files']["sequence"])
# Set taxon_id. taxon_id is inherited from the input file (i.e the source_id)
pdbMeta1["taxon_id"] = 0
if pdbMeta1["source_id"]:
for file_id in pdbMeta1["source_id"]:
pprint.pprint(args.metadata)
if args.metadata[file_id]["taxon_id"]:
pdbMeta1["taxon_id"] = args.metadata[file_id]["taxon_id"]
break
# Append output_file metadata into JSON data
json_data['output_files'].append(pdbMeta1)
### chromatin_dummy_trajectory_structure
pdbMeta2 = {}
# Set name. Should coincide with tool.json
pdbMeta2["name"] = "PDB_dummy_chromatin_structure"
# Set file_path. Absolute path. Should be better relative to root_dir?
tmp_dir = args.root_dir + "/" + args.config["arguments"]["project"]
pdb_file = tmp_dir + "/chromdyn_dummy_str.pdb"
pdbMeta1["file_path"] = pdb_file
# Set source_id & taxon_id
pdbMeta2["source_id"] = []
if "sequence" in args.config['input_files']:
pdbMeta2["source_id"].append(args.config['input_files']["sequence"])
# Set taxon_id. taxon_id is inherited from the input file (i.e the source_id)
pdbMeta2["taxon_id"] = 0
if pdbMeta2["source_id"]:
for file_id in pdbMeta2["source_id"]:
pprint.pprint(args.metadata)
if args.metadata[file_id]["taxon_id"]:
pdbMeta2["taxon_id"] = args.metadata[file_id]["taxon_id"]
break
# Append output_file metadata into JSON data
json_data['output_files'].append(pdbMeta2)
### chromatin_trajectory
trajMeta = {}
# Set name
# Should coincide with tool.json
trajMeta["name"] = "chromatin_trajectory"
# Set file_path
# Absolute path. Should be better relative to root_dir?
tmp_dir = args.root_dir + "/" + args.config["arguments"]["project"]
traj_file = tmp_dir + "/chromdyn_str.dcd"
trajMeta["file_path"] = traj_file
# Set source_id & taxon_id
trajMeta["source_id"] = []
if "sequence" in args.config['input_files']:
trajMeta["source_id"].append(args.config['input_files']["sequence"])
# Set taxon_id. taxon_id is inherited from the input file (i.e the source_id)
trajMeta["taxon_id"] = 0
if trajMeta["source_id"]:
for file_id in trajMeta["source_id"]:
pprint.pprint(args.metadata)
if args.metadata[file_id]["taxon_id"]:
trajMeta["taxon_id"] = args.metadata[file_id]["taxon_id"]
break
# Append output_file metadata into JSON data
json_data['output_files'].append(trajMeta)
# Prepare last output file: TAR of outputs, *CSVs and *PNGs
files = []
extensions = ('*.txt','*.csv','*.png')
out_dirs = []
if "createStructure" in args.config["arguments"]["operations"]:
out_dirs.append("{0}/{1}/str_{2}/output".format(args.root_dir, args.config["arguments"]["project"], x_rnd))
if "createTrajectory" in args.config["arguments"]["operations"]:
out_dirs.append("{0}/{1}/tra_{2}/output".format(args.root_dir, args.config["arguments"]["project"], x_rnd))
if "create3DfromNucleaR" in args.config["arguments"]["operations"]:
out_dirs.append("{0}/{1}/str_{2}/output".format(args.root_dir, args.config["arguments"]["project"], x_rnd))
for out_dir in out_dirs:
for extension in extensions:
files.extend(glob.glob(out_dir+"/"+extension))
tmp_dir = args.root_dir + "/" + args.config["arguments"]["project"]
out_tar = tmp_dir + "/results.tar.gz"
tar = tarfile.open(out_tar, "w:gz")
for fil in files:
logger.info ("Packing %s into statistics TAR" % os.path.basename(fil))
tar.add(fil, arcname=os.path.basename(fil))
tar.close()
# Set metadata required for TAR output file
result = {}
result["name"] = "summary"
result["source_id"] = []
if "sequence" in args.config['input_files']:
result["source_id"].append(args.config['input_files']["sequence"])
result["file_path"] = out_tar
result["taxon_id"] = 0
json_data['output_files'].append(result)
# Write down output file metadata
J = open(args.out_metadata, 'wb')
json.dump(json_data,J, indent=4)
J.close
logger.info("Output files annotated into %s" % args.out_metadata)
# Delete temporary directory
if "createStructure" in args.config["arguments"]["operations"]:
tmp_dir = "{0}/{1}/str_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
bashCommand = "rm -r %s" % tmp_dir
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
if "create3DfromNucleaR" in args.config["arguments"]["operations"]:
tmp_dir = "{0}/{1}/str_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
bashCommand = "rm -r %s" % tmp_dir
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
if "createTrajectory" in args.config["arguments"]["operations"]:
tmp_dir = "{0}/{1}/tra_{2}".format(args.root_dir, args.config["arguments"]["project"], x_rnd)
bashCommand = "rm -r %s" % tmp_dir
print bashCommand
process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
def main():
# Start logging
logger = logging.getLogger("lg")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s')
handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0])
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.INFO)
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
logger.info('Starting %s' % __file__)
# Parse CMD
parser = argparse.ArgumentParser(prog="chromatindyn_wf", description="Chromatin Dynamics workflow")
parser.add_argument("--config", required=True, type=Mugparams.check_json, metavar="CONFIG_JSON",
help="JSON file containing workflow parameters")
parser.add_argument("--root_dir", required=True, type=Mugparams.readable_dir, metavar="ABS_PATH",
help="Absolute path of the user data directory.")
parser.add_argument("--public_dir", required=False, type=Mugparams.readable_dir, metavar="PUBLIC_PATH",
help="Absolute path of the MuG public directory (with reference genome data, etc).")
parser.add_argument("--metadata", required=True, type=Mugparams.check_json, metavar="METADATA_JSON",
help="JSON file containing MuG metadata files")
parser.add_argument("--out_metadata", required=False, type=Mugparams.writeable_file, metavar="RESULTS_JSON",
help="JSON file containing results metadata")
parser.add_argument("-v", "--verbose", required=False, action="store_true",
help="increase output verbosity")
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
streamhandler.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
logger.debug("Verbose mode on")
# Parse config
Mugparams.process_arguments(args)
Mugparams.process_metadata(args)
# Print host info
num_cores = multiprocessing.cpu_count()
host = socket.gethostname()
#mem = psutil.virtual_memory()
logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores))
# Run pipeline
x_rnd = int(random.random()*10000000)
outfiles = run_pipeline(args, num_cores, x_rnd)
# Results
prepare_results(args, x_rnd)
if __name__ == '__main__':
main()
| 2.21875 | 2 |
ship_mapper/mapper.py | Diego-Ibarra/ship-mapper | 3 | 12799618 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.basemap import Basemap
import numpy as np
# Suppress matplotlib warnings
np.warnings.filterwarnings('ignore')
import xarray as xr
import cmocean
from pathlib import Path
import _pickle as pickle
import os
import ship_mapper as sm
import urllib.request
import netCDF4
def map_density(info, file_in=None, cmap='Default', sidebar=False,
to_screen=True, save=True,
filename_out='auto',filedir_out='auto'):
'''
Plots a map using a gridded (or merged) file
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
cmap (str): Colormap to use
sidebar (bool): If ``True``, includes side panel with metadata
to_screen (bool): If ``True``, a plot is printed to screen
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
filename_out (str): Name of produced figure.
If ``auto`` then name is ``info.run_name + '__' + file_in + '.png'``
filedir_out (str): Directory where figure is saved.
If ``auto`` then output directory is ``info.dirs.pngs``
Returns:
Basemap object
'''
print('map_density ------------------------------------------------------')
# Load data
if file_in == None:
file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc')
print(file_in)
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = d.attrs['minlat']
maxlat = d.attrs['maxlat']
minlon = d.attrs['minlon']
maxlon = d.attrs['maxlon']
basemap_file = info.dirs.basemap
print('Basemap file: ' + basemap_file)
# Check for basemap.p and, if doesn;t exist, make it
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
# Create grid for mapping
lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values)
xx,yy = m(lons_grid, lats_grid)
H = d['ship_density'].values
# Rotate and flip H... ----------------------------------------------------------------------------
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
d.attrs['mask_below'] = info.maps.mask_below
Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H)
# Set vman and vmin
print('Min: ' + str(np.min(Hmasked)))
print('Max: ' + str(np.max(Hmasked)))
print('Mean: ' + str(np.nanmean(Hmasked)))
print('Std: ' + str(Hmasked.std()))
if info.maps.cbarmax == 'auto':
# vmax = (np.median(Hmasked)) + (4*Hmasked.std())
vmax = (np.max(Hmasked)) - (2*Hmasked.std())
elif info.maps.cbarmax != None:
vmax = info.maps.cbarmax
else:
vmax = None
if info.maps.cbarmin == 'auto':
# vmin = (np.median(Hmasked)) - (4*Hmasked.std())
alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2
cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat)
# max_speed = 616.66 # m/min ...roughly 20 knots
max_speed = 316.66 # m/min ...roughly 20 knots
vmin = cellsize / max_speed
elif info.maps.cbarmin != None:
vmin = info.maps.cbarmin
else:
vmin = None
# Log H for better display
Hmasked = np.log10(Hmasked)
if vmin != None:
vmin = np.log10(vmin)
if vmax != None:
vmax = np.log10(vmax)
# Make colormap
fig = plt.gcf()
ax = plt.gca()
if cmap == 'Default':
cmapcolor = load_my_cmap('my_cmap_amber2red')
elif cmap == 'red2black':
cmapcolor = load_my_cmap('my_cmap_red2black')
else:
cmapcolor =plt.get_cmap(cmap)
cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax)
#scalebar
sblon = minlon + ((maxlon-minlon)/10)
sblat = minlat + ((maxlat-minlat)/20)
m.drawmapscale(sblon, sblat,
minlon, minlat,
info.maps.scalebar_km, barstyle='fancy',
units='km', fontsize=8,
fontcolor='#808080',
fillcolor1 = '#cccccc',
fillcolor2 = '#a6a6a6',
yoffset = (0.01*(m.ymax-m.ymin)),
labelstyle='simple',zorder=60)
if not sidebar:
cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
log_label_values = np.round(10 ** label_values,decimals=0)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_yticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'])
if sidebar:
text1, text2, text3, text4 = make_legend_text(info,d.attrs)
ax2 = plt.subplot2grid((1,24),(0,0),colspan=4)
# Turn off tick labels
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
ax2.add_patch(FancyBboxPatch((0,0),
width=1, height=1, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='#e6e6e6', alpha=1.0,
edgecolor='#a6a6a6',
transform=plt.gca().transAxes))
plt.text(0.15, 0.99, text1,
verticalalignment='top',
horizontalalignment='left',
weight='bold',
size=10,
color= '#737373',
transform=plt.gca().transAxes)
plt.text(0.02, 0.83, text2,
horizontalalignment='left',
verticalalignment='top',
size=9,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.145, text3,
horizontalalignment='left',
verticalalignment='top',
size=7,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.25, text4,
style='italic',
horizontalalignment='left',
verticalalignment='top',
size=8,
color= '#808080',
transform=plt.gca().transAxes)
cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
cbar.ax.tick_params(labelsize=8, labelcolor='#808080')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
# print("values")
# print(label_values)
log_label_values = np.round(10 ** label_values,decimals=0)
# print(log_label_values)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_xticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080')
# TODO: maybe delete this?
# mng = plt.get_current_fig_manager()
# mng.frame.Maximize(True)
#
# fig.tight_layout()
plt.show()
# Save map as png
if save:
if filedir_out == 'auto':
filedir = str(info.dirs.pngs)
else:
filedir = filedir_out
if filename_out == 'auto':
filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png'
else:
filename = filename_out
sm.checkDir(filedir)
plt.savefig(os.path.join(filedir,filename), dpi=300)
# Close netCDF file
d.close()
if to_screen == False:
plt.close()
return
def make_legend_text(info,md):
'''
Makes text for legend in left block of map
:param info info: ``info`` object containing metadata
:return: text for legend
'''
import datetime
alat = (md['maxlat'] - md['minlat'])/2
text1 = 'VESSEL DENSITY HEATMAP'
# print(info)
# --------------------------------------------------------
text2 = ('Unit description: ' + md['unit_description'] + '\n\n' +
'Data source: ' + md['data_source'] + '\n\n' +
'Data source description:\n' + md['data_description'] + '\n\n' +
'Time range: \n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\n\n' +
'Included speeds: ' + info.sidebar.included_speeds + '\n' +
'Included vessels: ' + info.sidebar.included_vessel_types + '\n\n' +
'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\n' +
'EPGS code: ' + md['epsg_code'] + '\n' +
'Interpolation: ' + md['interpolation'] + '\n' +
'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\n' +
'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\n' +
'Mask below: ' + str(md['mask_below']) + ' vessels per grid'
)
text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n' +
'Creation script: ' + info.run_name + '.py\n' +
'Software: ship mapper v0.1\n\n' +
'Created by:\n' +
'Oceans and Coastal Management Division\n' +
'Ecosystem Management Branch\n' +
'Fisheries and Oceans Canada – Maritimes Region\n' +
'Bedford Institute of Oceanography\n' +
'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2'
)
text4 = ('---------------------------------------------------------------\n' +
'WARNING: This is a preliminary data product.\n' +
'We cannot guarantee the validity, accuracy, \n' +
'or quality of this product. Data is provided\n' +
'on an "AS IS" basis. USE AT YOUR OWN RISK.\n' +
'---------------------------------------------------------------\n'
)
return text1, text2, text3, text4
def map_dots(info, file_in, sidebar=False, save=True):
'''
Creates a map of "pings" rather than gridded density
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
sidebar (bool): If ``True``, includes side panel with metadata
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
if sidebar:
basemap_file = str(path_to_basemap / 'basemap_sidebar.p')
else:
basemap_file = str(path_to_basemap / 'basemap.p')
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10)
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def map_dots_one_ship(info, file_in, Ship_No, save=True):
'''
Creates a map of "pings" (i.e. not gridded density) of only one ship
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
Ship_No (str): Unique identifier of the ship to plot
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
import pandas as pd
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
# basemap_file = str(path_to_basemap / 'basemap_spots.p')
m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# if not os.path.exists(str(path_to_basemap / 'basemap.p')):
# m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# else:
# print('Found basemap...')
# m = pickle.load(open(basemap_file,'rb'))
indx = ((d['longitude']> minlon) &
(d['longitude']<= maxlon) &
(d['latitude']> minlat) &
(d['latitude']<= maxlat))
filtered_data = d.sel(Dindex=indx)
ship_id = info.ship_id
unis = pd.unique(filtered_data[ship_id].values)
ship = unis[Ship_No]
indxship = (filtered_data[ship_id] == ship)
singleship = filtered_data.sel(Dindex=indxship)
print('Ship id:'+ str(ship))
# print(singleship['longitude'].values)
# print(singleship['latitude'].values)
x, y = m(singleship['longitude'].values,singleship['latitude'].values)
# x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,2,marker='o',color='r', zorder=30)
# fig = plt.figure()
# plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.')
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def define_path_to_map(info, path_to_basemap='auto'):
'''
Figures out where is the .basemap and .grid files
Arguments:
info (info): ``info`` object containing metadata
'''
if path_to_basemap == 'auto':
if info.grid.type == 'one-off':
path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary')
elif info.grid.type == 'generic':
path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary'))
else:
path_to_map = path_to_basemap
return path_to_map
def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False):
'''
Makes a basemap
Arguments:
info (info): ``info`` object containing metadata
spatial (list): List with corners... this will be deprecated soon
Keyword arguments:
path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'``
then path is setup by :func:`~ship_mapper.mapper.define_path_to_map`
sidebar (bool): If ``True`` space for a side panel is added to the basemap
Returns:
A ``.basemap`` and a ``.grid`` files
'''
print('Making basemap...')
# -----------------------------------------------------------------------------
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
sm.checkDir(str(path_to_map))
minlat = spatial[0]
maxlat = spatial[1]
minlon = spatial[2]
maxlon = spatial[3]
# Create map
m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,
llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution)
# TOPO
# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html
# using the netCDF output option
# bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc')
bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc')
if not os.path.isfile(bathymetry_file):
isub = 1
base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'
query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)
url = base_url+query
# store data in NetCDF file
urllib.request.urlretrieve(url, bathymetry_file)
# open NetCDF data in
nc = netCDF4.Dataset(bathymetry_file)
ncv = nc.variables
lon = ncv['longitude'][:]
lat = ncv['latitude'][:]
lons, lats = np.meshgrid(lon,lat)
topo = ncv['topo'][:,:]
#
fig = plt.figure(figsize=(19,9))
# ax = fig.add_axes([0.05,0.05,0.80,1])
# ax = fig.add_axes([0,0,0.80,1])
# ax = fig.add_axes([0.23,0.035,0.85,0.9])
if sidebar:
ax = plt.subplot2grid((1,24),(0,5),colspan=19)
else:
ax = fig.add_axes([0.05,0.05,0.94,0.94])
TOPOmasked = np.ma.masked_where(topo>0,topo)
cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5)
# m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25)
# m.fillcontinents(color='#E1E1A0',zorder=23)
m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25)
m.fillcontinents(color='#e6e6e6',zorder=23)
m.drawmapboundary()
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
parallels = np.arange(minlat,maxlat,info.maps.parallels)
# labels = [left,right,top,bottom]
par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(par,'#00a3cc')
meridians = np.arange(minlon,maxlon,info.maps.meridians)
mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(mers,'#00a3cc')
ax = plt.gca()
# ax.axhline(linewidth=4, color="#00a3cc")
# ax.axvline(linewidth=4, color="#00a3cc")
#
ax.spines['top'].set_color('#00a3cc')
ax.spines['right'].set_color('#00a3cc')
ax.spines['bottom'].set_color('#00a3cc')
ax.spines['left'].set_color('#00a3cc')
for k, spine in ax.spines.items(): #ax.spines is a dictionary
spine.set_zorder(35)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
# fig.tight_layout(pad=0.25)
fig.tight_layout(rect=[0.01,0.01,.99,.99])
plt.show()
if sidebar:
basemap_name = 'basemap_sidebar.p'
else:
basemap_name = 'basemap.p'
info = sm.calculate_gridcell_areas(info)
# Save basemap
save_basemap(m,info,path_to_basemap=path_to_map)
# picklename = str(path_to_map / basemap_name)
# pickle.dump(m,open(picklename,'wb'),-1)
# print('!!! Pickle just made: ' + picklename)
#
## pngDir = 'C:\\Users\\IbarraD\\Documents\\VMS\\png\\'
## plt.savefig(datadir[0:-5] + 'png\\' + filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) + '.png')
# plt.savefig('test.png')
return m
def load_my_cmap(name):
'''
Creates and loads custom colormap
'''
# cdict = {'red': ((0.0, 0.0, 0.0),
# (1.0, 0.7, 0.7)),
# 'green': ((0.0, 0.25, 0.25),
# (1.0, 0.85, 0.85)),
# 'blue': ((0.0, 0.5, 0.5),
# (1.0, 1.0, 1.0))}
# my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
if name == 'my_cmap_lightblue':
cdict = {'red': ((0.0, 0.0, 0.0), # Dark
(1.0, 0.9, 0.9)), # Light
'green': ((0.0, 0.9, 0.9),
(1.0, 1.0,1.0)),
'blue': ((0.0, 0.9, 0.9),
(1.0, 1.0, 1.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_amber2red':
# cdict = {'red': ((0.0, 1.0, 1.0),
# (1.0, 0.5, 0.5)),
# 'green': ((0.0, 1.0, 1.0),
# (1.0, 0.0, 0.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0))}
# my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256)
cdict = {'red': ((0.0, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.85, 0.85),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.3, 0.3),
(1.0, 0.0, 0.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_red2black':
# c1 = np.array([252,142,110])/256 #RGB/256
c1 = np.array([250,59,59])/256 #RGB/256
c2 = np.array([103,0,13])/256 #RGB/256
cdict = {'red': ((0.0, c1[0], c1[0]),
(1.0, c2[0], c2[0])),
'green': ((0.0, c1[1], c1[1]),
(1.0, c2[1], c2[1])),
'blue': ((0.0, c1[2], c1[2]),
(1.0, c2[2], c2[2]))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
else:
print('cmap name does not match any of the available cmaps')
return my_cmap
def save_basemap(m,info,path_to_basemap='auto'):
'''
Saves basemap (and correspoding info.grid) to a pickle file
Arguments:
m (mpl_toolkits.basemap.Basemap): Basemap object
info (info): ``info`` object containing metadata
Keyword Arguments:
path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory
Returns:
Pickle file
See also:
:mod:`pickle`
'''
#
# basemap = [grid, m]
# f = open(str(path_to_map / (info.grid.basemap + '.p')),'w')
# pickle.dump(grid, f)
# pickle.dump(m, f)
# f.close()
# picklename = str(path_to_map / (info.grid.basemap + '.p'))
# pickle.dump(basemap, open(picklename, 'wb'), -1)
# print('!!! Pickle just made: ' + picklename)
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
# basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap'))
basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap')
pickle.dump(m, open(basemap_picklename, 'wb'), -1)
# info_picklename = str(path_to_map / (info.grid.basemap + '.grid'))
info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid')
pickle.dump(info, open(info_picklename, 'wb'), -1)
print('!!! Pickles were just made: ' + basemap_picklename)
return
| 2.40625 | 2 |
template/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/cli/__init__.py | seldonPlan/pyscript | 0 | 12799619 | <gh_stars>0
from .init_cfg import init
from .root import root
# add sub-command functions here
root.add_command(init)
| 1.328125 | 1 |
tweezers/ixo/decorators.py | DollSimon/tweezers | 0 | 12799620 | <reponame>DollSimon/tweezers
class lazy(object):
"""
Property for the lazy evaluation of Python attributes. In the example below, the ``expensive_computation()`` is only
called when the attribute ``object.my_attribute`` is accessed for the first time.
Example:
Use the ``@lazy`` decorator for a function that returns the result of the computation. Access it as a normal
attribute::
@lazy
def my_attribute(self):
value = self.expensive_computation()
return value
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return None
value = self.func(instance)
setattr(instance, self.func.__name__, value)
return value | 3.9375 | 4 |
pymoo/model/repair.py | gabicavalcante/pymoo | 762 | 12799621 | <filename>pymoo/model/repair.py
from abc import abstractmethod
class Repair:
"""
This class is allows to repair individuals after crossover if necessary.
"""
def do(self, problem, pop, **kwargs):
return self._do(problem, pop, **kwargs)
@abstractmethod
def _do(self, problem, pop, **kwargs):
pass
class NoRepair(Repair):
"""
A dummy class which can be used to simply do no repair.
"""
def do(self, problem, pop, **kwargs):
return pop | 3.203125 | 3 |
Server/Python/src/dbs/dao/MySQL/BlockSite/Insert.py | vkuznet/DBS | 8 | 12799622 | #!/usr/bin/env python
""" DAO Object for BlockSite table """
from dbs.dao.Oracle.BlockSite.Insert import Insert as OraBlockSiteInsert
class Insert(OraBlockSiteInsert):
pass
| 1.898438 | 2 |
Extra/plot_error.py | niefermar/APPIAN-PET-APPIAN | 1 | 12799623 | import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
from re import sub
from sys import argv, exit
from os.path import splitext
import numpy as np
import os
def load(fn):
df=pd.read_csv(fn)
return(df)
def plot(df0, df, tracer) :
out_fn = tracer + ".png"
plt.clf()
sns.stripplot(x="roi", y="value", hue="sub", data=df, jitter=True, alpha=.6, zorder=1)
sns.pointplot(x="roi", y="value", data=df0, join=False, palette="dark", markers="d", scale=1.5)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(out_fn)
def get_qc_metrics():
fn_list = []
df_list = []
fn_list += glob("raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv")
#fn_list += glob("scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv")
#fn_list += glob("fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv")
for fn in fn_list :
temp=pd.read_csv(fn)
print fn
args = fn.split("/")[4].split("_")
if 'rcl' in fn : temp["tracer"] = ["rcl"] * temp.shape[0]
elif 'fdg' in fn : temp["tracer"] = ["fdg"] * temp.shape[0]
elif 'fmz' in fn : temp["tracer"] = ["fmz"] * temp.shape[0]
#temp["frame"] = [0] * temp.shape[0]
#temp["errortype"] = [args[1]] * temp.shape[0]
#temp["error"] = [int(args[2])] * temp.shape[0]
df_list += [temp]
df = pd.concat(df_list)
return(df)
def get_error():
fn_list=[]
fn_list += glob("fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv")
fn_list += glob("scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv")
fn_list += glob("raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv")
df_list = []
for fn in fn_list :
temp=pd.read_csv(fn)
args = fn.split("/")[4].split("_")
if 'rcl' in fn : temp["tracer"] = ["rcl"] * temp.shape[0]
elif 'fdg' in fn : temp["tracer"] = ["fdg"] * temp.shape[0]
elif 'fmz' in fn : temp["tracer"] = ["fmz"] * temp.shape[0]
temp["frame"] = [0] * temp.shape[0]
temp["errortype"] = [args[1]] * temp.shape[0]
temp["error"] = [int(args[2])] * temp.shape[0]
df_list += [temp]
df = pd.concat(df_list)
df["metric"].loc[ (df["tracer"] == "fmz") & (df["metric"] == "mean")] = "BPnd"
df["metric"].loc[ (df["tracer"] == "fdg") & (df["metric"] == "mean")] = "Ki"
df["metric"].loc[ (df["tracer"] == "rcl") & (df["metric"] == "mean")] = "BPnd"
df["roi"].loc[ (df["tracer"] == "rcl") ] = "Putamen"
df["roi"].loc[ (df["tracer"] == "fmz") ] = "GM"
df["roi"].loc[ (df["tracer"] == "fdg") ] = "GM"
df.index = range(df.shape[0])
df["%Accuracy"]= [0] * df.shape[0]
for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) :
sub=name[5]
ses=name[3]
task=name[4]
error=name[2]
idx = (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df["sub"] == name[5])
zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df.error == 0) & (df["sub"] == name[5]) ]
values = df0["value"].mean()
zeros = zeros_df["value"].mean()
ratio = values / zeros
df["%Accuracy"].loc[(df["error"] == name[2]) & idx] = ratio
return(df)
df_fn = os.getcwd() + os.sep + 'appian_error.csv'
qc_fn = os.getcwd() + os.sep + 'appian_qc.csv'
if not os.path.exists(df_fn) :
df = get_error()
df.to_csv(df_fn)
else :
df = pd.read_csv(df_fn)
if not os.path.exists(qc_fn) :
qc = get_qc_metrics()
qc.to_csv(qc_fn)
else :
qc = pd.read_csv(qc_fn)
print(qc)
exit(0)
df_mean = df.groupby(["analysis","tracer","error","errortype","frame","metric","roi"])["%Accuracy"].mean()
df_mean = df_mean.reset_index()
df_mean["tracer"].loc[ (df_mean["tracer"] == "rcl") ] = "RCL"
df_mean["tracer"].loc[ (df_mean["tracer"] == "fdg") ] = "FDG"
df_mean["tracer"].loc[ (df_mean["tracer"] == "fmz") ] = "FMZ"
df_mean["analysis"].loc[ (df_mean["analysis"] == "tka") ] = "TKA"
df_mean["analysis"].loc[ (df_mean["analysis"] == "pvc") ] = "PVC"
df_mean["analysis"].loc[ (df_mean["analysis"] == "pet-coregistration") ] = "Coregistration"
print df_mean
plt.clf()
plt.figure()
nTracer = len(df["tracer"].unique())
nROI= len(df["analysis"].unique())
i=1
df.rename(index=str, columns={"roi":"ROI","analysis":"Analysis","tracer":"Radiotracer"}, inplace=True)
sns.factorplot(x="error", y="%Accuracy", col="analysis", hue="tracer", palette="muted",kind="swarm",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean)
#for name, df3 in df2.groupby(['sub']) :
# print df3
# sns.swarmplot(x="roi", y='groundtruth',data=df3, palette="bright")
# break
#ax = sns.factorplot(x="roi", y="diff", row="roi", hue="analysis", data=df, palette="Set2", dodge=True)
#grid = sns.FacetGrid(df_mean, row="tracer", col="analysis", sharey=True, palette="muted", size=5)
#grid = grid.map(plt.scatter, "roi", "value")
#grid = grid.map(plt.scatter, "groundtruth", "value")
plt.savefig("appian_error.png")
| 2.15625 | 2 |
feedparser/datetimes/greek.py | verhovsky/feedparser | 0 | 12799624 | <reponame>verhovsky/feedparser<filename>feedparser/datetimes/greek.py
from __future__ import absolute_import, unicode_literals
import re
from .rfc822 import _parse_date_rfc822
# Unicode strings for Greek date strings
_greek_months = \
{ \
'\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7
'\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7
'\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7
'\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7
'\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7
'\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7
'\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7
'\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7
'\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7
'\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7
'\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7
'\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7
'\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7
'\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7
'\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7
'\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7
'\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7
'\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7
'\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
'\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
'\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
'\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
'\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
'\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
'\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
'\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(r'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
| 2.09375 | 2 |
arxiv/__init__.py | cnglen/arxiv.py | 9 | 12799625 | <reponame>cnglen/arxiv.py<gh_stars>1-10
from .arxiv import *
| 0.851563 | 1 |
core/migrations/0002_auto_20191215_0201.py | IS-AgroSmart/MVP | 0 | 12799626 | # Generated by Django 3.0 on 2019-12-15 02:01
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artifact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)),
],
),
migrations.CreateModel(
name='Flight',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('date', models.DateField(auto_now_add=True)),
('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)),
('multispectral_processing', models.BooleanField(default=False)),
('annotations', models.TextField()),
('deleted', models.BooleanField(default=False)),
('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)),
],
),
migrations.RemoveField(
model_name='user',
name='bio',
),
migrations.RemoveField(
model_name='user',
name='birth_date',
),
migrations.RemoveField(
model_name='user',
name='location',
),
migrations.AddField(
model_name='user',
name='organization',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='user',
name='type',
field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20),
),
migrations.CreateModel(
name='UserProject',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('deleted', models.BooleanField(default=False)),
('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')),
('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='flight',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='DemoProject',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('deleted', models.BooleanField(default=False)),
('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')),
('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')),
('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='artifact',
name='flight',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts', to='core.Flight'),
),
]
| 1.765625 | 2 |
Classifier/classify.py | FoodLossFYDP/AmbrosiaImageService | 0 | 12799627 | import numpy as np
import cv2
cascade = cv2.CascadeClassifier('cascade.xml')
img = cv2.imread('orange.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150))
for (x,y,w,h) in oranges:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5))
cv2.imshow('img',imgs)
cv2.waitKey(0)
cv2.destroyAllWindows()
# # show image thats being collected
# $ for filename in Positives/*.jpg;
# $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3;
# $ done
# $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize 2048 -precalcIdxBufSize 2048
| 2.71875 | 3 |
armory_config/settings.py | 3ndG4me/Configs | 2 | 12799628 | <reponame>3ndG4me/Configs
#!/usr/bin/python3
import os
import pathlib
data_path = pathlib.Path(str(pathlib.Path().absolute()) + "/armory_data")
home = str(pathlib.Path.home())
abolute_home = "CHANGEME"
custom_modules = abolute_home + "/tools/armory_custom/modules"
custom_reports = abolute_home + "/tools/armory_custom/reports"
custom_webapps = abolute_home + "/tools/armory_webapps"
if data_path.exists():
data_path = str(pathlib.Path().absolute()) + "/armory_data"
else:
os.mkdir("armory_data")
data_path = str(pathlib.Path().absolute()) + "/armory_data"
ARMORY_CONFIG = {
'ARMORY_BASE_PATH': data_path,
'ARMORY_CUSTOM_MODULES': [
custom_modules,
],
'ARMORY_CUSTOM_REPORTS': [
custom_reports,
],
'ARMORY_CUSTOM_WEBAPPS': [
custom_webapps,
],
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ARMORY_CONFIG['ARMORY_BASE_PATH'], 'db.sqlite3'),
}
} | 1.648438 | 2 |
tnChecker.py | BlackTurtle123/TN-ETH-Gateway | 0 | 12799629 | <gh_stars>0
import os
import sqlite3 as sqlite
import requests
import time
import base58
import PyCWaves
import traceback
import sharedfunc
from web3 import Web3
from verification import verifier
class TNChecker(object):
def __init__(self, config):
self.config = config
self.dbCon = sqlite.connect('gateway.db')
self.node = self.config['tn']['node']
self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node']))
self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey'])
self.verifier = verifier(config)
cursor = self.dbCon.cursor()
self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain = "TN"').fetchall()[0][0]
def getCurrentBlock(self):
#return current block on the chain - try/except in case of timeouts
try:
CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1
except:
CurrentBlock = 0
return CurrentBlock
def run(self):
#main routine to run continuesly
print('started checking tn blocks at: ' + str(self.lastScannedBlock))
self.dbCon = sqlite.connect('gateway.db')
while True:
try:
nextblock = self.getCurrentBlock() - self.config['tn']['confirmations']
if nextblock > self.lastScannedBlock:
self.lastScannedBlock += 1
self.checkBlock(self.lastScannedBlock)
cursor = self.dbCon.cursor()
cursor.execute('UPDATE heights SET "height" = ' + str(self.lastScannedBlock) + ' WHERE "chain" = "TN"')
self.dbCon.commit()
except Exception as e:
self.lastScannedBlock -= 1
print('Something went wrong during tn block iteration: ')
print(traceback.TracebackException.from_exception(e))
time.sleep(self.config['tn']['timeInBetweenChecks'])
def checkBlock(self, heightToCheck):
#check content of the block for valid transactions
block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json()
for transaction in block['transactions']:
if self.checkTx(transaction):
targetAddress = base58.b58decode(transaction['attachment']).decode()
targetAddress = self.w3.toChecksumAddress(targetAddress)
if not(self.w3.isAddress(targetAddress)):
self.faultHandler(transaction, "txerror")
else:
amount = transaction['amount'] / pow(10, self.config['tn']['decimals'])
amount -= self.config['erc20']['fee']
amount *= pow(10, self.config['erc20']['decimals'])
amount = int(round(amount))
if amount < 0:
self.faultHandler(transaction, "senderror", e='under minimum amount')
else:
try:
nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress'])
if self.config['erc20']['gasprice'] > 0:
gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei')
else:
gasprice = int(self.w3.eth.gasPrice * 1.1)
tx = {
'to': targetAddress,
'value': amount,
'gas': self.config['erc20']['gas'],
'gasPrice': gasprice,
'nonce': nonce,
'chainId': self.config['erc20']['chainid']
}
signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey)
txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction)
if not(str(txId.hex()).startswith('0x')):
self.faultHandler(transaction, "senderror", e=txId.hex())
else:
print("send tx: " + str(txId.hex()))
cursor = self.dbCon.cursor()
amount /= pow(10, self.config['erc20']['decimals'])
cursor.execute('INSERT INTO executed ("sourceAddress", "targetAddress", "tnTxId", "ethTxId", "amount", "amountFee") VALUES ("' + transaction['sender'] + '", "' + targetAddress + '", "' + transaction['id'] + '", "' + txId.hex() + '", "' + str(round(amount)) + '", "' + str(self.config['erc20']['fee']) + '")')
self.dbCon.commit()
print('send tokens from tn to other network!')
self.verifier.verifyOther(txId)
except Exception as e:
self.faultHandler(transaction, "txerror", e=e)
continue
def checkTx(self, tx):
#check the transaction
if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']:
#check if there is an attachment
targetAddress = base58.b58decode(tx['attachment']).decode()
if len(targetAddress) > 1:
#check if we already processed this tx
cursor = self.dbCon.cursor()
result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = "' + tx['id'] + '"').fetchall()
if len(result) == 0: return True
else:
self.faultHandler(tx, 'noattachment')
return False
def faultHandler(self, tx, error, e=""):
#handle transfers to the gateway that have problems
amount = tx['amount'] / pow(10, self.config['tn']['decimals'])
timestampStr = sharedfunc.getnow()
if error == "noattachment":
cursor = self.dbCon.cursor()
cursor.execute('INSERT INTO errors ("sourceAddress", "targetAddress", "ethTxId", "tnTxId", "amount", "error") VALUES ("' + tx['sender'] + '", "", "", "' + tx['id'] + '", "' + str(amount) + '", "no attachment found on transaction")')
self.dbCon.commit()
print(timestampStr + " - Error: no attachment found on transaction from " + tx['sender'] + " - check errors table.")
if error == "txerror":
targetAddress = base58.b58decode(tx['attachment']).decode()
cursor = self.dbCon.cursor()
cursor.execute('INSERT INTO errors ("sourceAddress", "targetAddress", "ethTxId", "tnTxId", "amount", "error", "exception") VALUES ("' + tx['sender'] + '", "' + targetAddress + '", "", "' + tx['id'] + '", "' + str(amount) + '", "tx error, possible incorrect address", "' + str(e) + '")')
self.dbCon.commit()
print(timestampStr + " - Error: on outgoing transaction for transaction from " + tx['sender'] + " - check errors table.")
if error == "senderror":
targetAddress = base58.b58decode(tx['attachment']).decode()
cursor = self.dbCon.cursor()
cursor.execute('INSERT INTO errors ("sourceAddress", "targetAddress", "ethTxId", "tnTxId", "amount", "error", "exception") VALUES ("' + tx['sender'] + '", "' + targetAddress + '", "", "' + tx['id'] + '", "' + str(amount) + '", "tx error, check exception error", "' + str(e) + '")')
self.dbCon.commit()
print(timestampStr + " - Error: on outgoing transaction for transaction from " + tx['sender'] + " - check errors table.")
| 2.34375 | 2 |
src/yellowdog_client/model/compute_requirement_status.py | yellowdog/yellowdog-sdk-python-public | 0 | 12799630 | from enum import Enum
class ComputeRequirementStatus(Enum):
"""
Describes the status of a compute requirement.
The status of a compute requirement provides an aggregated view of the statuses of compute machine instances provisioned for that requirement.
"""
NEW = "NEW"
"""The compute requirement has been created and submitted to YellowDog Compute."""
PENDING = "PENDING"
"""
YellowDog Compute is in the process of provisioning compute machine instances to meet the requirement.
The requirement will remain in PENDING state until all newly provisioned instances have transitioned to RUNNING or TERMINATED (in the case where a provider cancels provision).
"""
STARTING = "STARTING"
"""
The computer machine instances provisioned for the requirement are being re-started after having been stopped.
Some instances may already have started, however the requirement is still considered to be STARTING until all instances have started.
"""
RUNNING = "RUNNING"
"""
The computer machine instances provisioned for the requirement are running.
Individual instances may be independently transitioned to other states but the requirement is still considered to be running.
"""
STOPPING = "STOPPING"
"""
The computer machine instances provisioned for the requirement are being stopped.
Some instances may already have stopped, however the requirement is still considered to be STOPPING until all instances have stopped.
"""
STOPPED = "STOPPED"
"""The computer machine instances provisioned for the requirement have stopped."""
TERMINATING = "TERMINATING"
"""
The computer machine instances provisioned for the requirement are being terminated.
Some instances may already be terminated, however the requirement is still considered to be TERMINATING until all instances have terminated.
"""
TERMINATED = "TERMINATED"
"""
The computer machine instances provisioned for the requirement have been terminated.
At this point the compute requirement may no longer be changed and is considered to be in a final state.
"""
def __str__(self) -> str:
return self.name
| 3.46875 | 3 |
hello.py | jas5mg/cs3240-labdemo | 0 | 12799631 | from helper import greeting
greeting("What's Up", "Jake")
| 1.28125 | 1 |
src/tests/test_bitfinex_algo.py | medvi/python-bitfinex_algo | 0 | 12799632 | <gh_stars>0
import logging
import unittest
from bitfinex_algo.cli import load_config, validate_config
from bitfinex_algo import cli as c
logger = logging.getLogger('bitfinex')
class ConfigTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_load_config(self):
self.assertIsNone(load_config('tests/config/invalid_config_1.yaml'))
self.assertDictEqual(
validate_config(load_config('tests/config/valid_config_5.yaml')),
{
c.LEVELS: [{
c.BUY_PRICE: 95,
c.SELL_PRICE: 100,
c.ORDER_SIZE: 100,
c.ORDER_COUNT: 2
}, {
c.BUY_PRICE: 100,
c.SELL_PRICE: 105,
c.ORDER_SIZE: 100,
c.ORDER_COUNT: 1
}],
c.UPDATE_FREQUENCY: 3,
}
)
def test_validate_config(self):
for i in range(2, 5):
with self.subTest(i=i):
config = load_config(f'tests/config/invalid_config_{i}.yaml')
self.assertIsNone(validate_config(config))
| 2.578125 | 3 |
G00364742.py | SomanathanSubramaniyan/Applied-Databases | 0 | 12799633 | # Applied Database
# Final Project
# Section 4.4 - Python program answers
# Author : Somu
#mySQL modules import
import mysql.connector
from mysql.connector import Error
from mysql.connector import errorcode
import pandas as pd
#Mongo modules import
import pymongo
from pymongo import MongoClient
#Pandas printing module
from tabulate import tabulate
# This function will display a Menu as requested in the project specification
def menu():
print("--------")
print("World DB")
print("--------")
print("Menu")
print("====")
print("1 - View 15 Cities")
print("2 - View Cities by population")
print("3 - Add New City")
print("4 - Find Car by Engine Size")
print("5 - Add New Car")
print("6 - View Countries by name")
print("7 - View Countries by population")
print("x - Exit application")
myclient = None
global dfp, df
dfp =""
df = pd.DataFrame()
def Mongoconnect(csize,choice,id,reg,size):
try:
global myclient
myclient =pymongo.MongoClient(host = "localhost",port=27017)
myclient.admin.command('ismaster')
mydb = myclient['docs']
docs = mydb["docs"]
if choice == "4":
query = {"car.engineSize":float(csize)}
car = docs.find(query)
for p in car:
print ('{0} | {1} | {2} '.format(p["_id"],p["car"],p["addresses"]))
if choice == "5":
query={"_id":int(id), "car": { "reg":reg,"engineSize":float(size)}}
x = docs.insert_one(query)
query = {"_id":int(id)}
car = docs.find(query)
for p in car:
print (p)
except :
print ("******Error Occurred while executing Mongo commands******")
def globalSet ():
global dfp
dfp = "2"
def DBconnection(query,choice,code,param1):
try:
connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>')
cursor = connection.cursor(prepared=True)
global dfp,df
if (choice == "6" or choice == "7") and dfp != "2" :
df = pd.read_sql_query(query, connection)
globalSet()
if choice == "1" :
cursor.execute(query)
names = list(map(lambda x: x[0], cursor.description))
print("----------------------------------------------------------------------------------")
print("{:5} | {:^20} | {:^12} | {:^20} | {:10}".format(names[0],names[1],names[2],names[3],names[4]))
print("----------------------------------------------------------------------------------")
for (id,name, countrycode, district,population, latitue,longitude) in cursor:
print("{:5} | {:^20} | {:^12} | {:^20} | {:d}".format(id,name, countrycode, district,population))
elif choice == "2" :
cursor.execute(query)
names = list(map(lambda x: x[0], cursor.description))
print("----------------------------------------------------------------------------------")
print("{:5} | {:^20} | {:^12} | {:^20} | {:10}".format(names[0],names[1],names[2],names[3],names[4]))
print("----------------------------------------------------------------------------------")
for (id,name, countrycode, district,population, latitue,longitude) in cursor:
print("{:5} | {:^20} | {:^12} | {:^20} | {:d}".format(id,name, countrycode, district,population))
elif choice == "3":
cursor.execute(query)
connection.commit
print("**** RESULT ***** The new city record is inserted into the table")
elif choice == "6" :
df1 = df[df["Name"].str.contains(code)].loc[:,["Name","Continent","population","HeadofState"]]
#print tabulate(df1.to_string(index=False))
print(tabulate(df1, headers="keys",tablefmt="orgtbl"))
elif choice == "7":
if param1 == ">":
df1 = df[(df["population"] > int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
elif param1 == "<":
df1 = df[(df["population"] < int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
elif param1 == "=":
df1 = df[(df["population"] == int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
print(tabulate(df1, headers="keys",tablefmt="orgtbl"))
except mysql.connector.Error as error :
if error.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif error.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
elif error.errno == 1452:
print("----------------------------------------------------")
print("***ERROR***: Country Code "+ code + " does not exist")
print("----------------------------------------------------")
else:
print("Failed to connect to the database: {}".format(error))
connection.rollback()
finally:
#closing database connection.
if(connection.is_connected()):
connection.close()
def displaymenu():
print("This is not a valid choice. You can only choose from the above options")
input("\nPress enter to continue...")
def main():
while True:
menu()
choice = input("Choice : --> ")
Code,param1 = "",""
if choice == "x":
print("Bye - Program Terminate now and welcome back anytime!")
return
elif choice == "1":
query= "select * from city limit 15"
DBconnection (query, choice,Code,param1)
elif choice == "2":
print("Cities by Population")
print("--------------------")
while True:
Comparison = input("Enter <, > or = :")
if Comparison == "<" or Comparison == ">" or Comparison == "=":
query = "select * from city where population" + Comparison
break
else:
displaymenu()
while True:
Value= input("Enter Population :")
if Value.isdigit() == True:
query = query + str(Value)
break
else:
displaymenu()
DBconnection (query, choice,Code,param1)
elif choice == "3":
print("Add New City")
print("------------")
City= input("Enter City Name :")
Code= input("Country Code :")
district= input("District :")
pop= input("Population :")
query = "Insert INTO city (name, countrycode,district,population) VALUES ('" + City + "','" + Code + "','" + district + "',"+ str(pop)+")"
DBconnection (query, choice, Code,param1)
elif choice == "6":
print("Countries by Name")
print("-----------------")
Ctyname = input("Enter Country Name :")
query = "select code, Name, Continent,population,HeadofState from country"
Code=Ctyname
DBconnection (query, choice, Code,param1)
elif choice == "7":
print("Countries by Population")
print("-----------------------")
query = "select code, Name, Continent,population,HeadofState from country"
while True:
Comparison = input("Enter <, > or = :")
if Comparison == "<" or Comparison == ">" or Comparison == "=":
param1=Comparison
break
else:
displaymenu()
while True:
Value= input("Enter Population :")
if Value.isdigit() == True:
Code = Value
break
else:
displaymenu()
DBconnection (query, choice, Code,param1)
elif choice == "4":
print("show cars by engine size")
print("------------------------")
while True:
csize = input("Enter Car Engine Size :")
if csize.isdigit() == True:
csize = csize
break
else:
displaymenu()
Mongoconnect(csize,choice,"","","")
elif choice == "5":
print("Add New Car")
print("-----------")
id= input("_ids:")
reg= input("Enter reg :")
size= input("Enter Size :")
Mongoconnect("",choice,id,reg,size)
else:
print("That is not a valid choice. You can only choose from the menu.")
input("\nPress enter to continue...")
if __name__ == "__main__":
main() | 3.53125 | 4 |
recipes/Python/543261_grade_keeper/recipe-543261.py | tdiprima/code | 2,023 | 12799634 | #! /usr/bin/python
# keep record of grades. Made by <NAME>. 0.1-PUBLIC
# NOTE! All letter answers are to be written in quotes (including dates)!
print """############################################
# Welcome to Gradebook! v 0.1 #
# YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! #
############################################"""
subject = raw_input("What is your assignment's subject? ")
# ^^This asks your class subject; assigns it to 'subject'; and is used later.
date = input('What is the date for your assignment? ')
# ^^This is pretty much the same: but asks the date.
amount = input('What is the number of questions? (NOTE: make all #s from now decimals. e.g.: "5.0" ')
# ^^^This is also the same, but make the number a DECIMAL!
correct = input('How many questions did you get correct? ')
# ^^^The same... make all DECIMALS!
calc = divmod(correct, amount)
# This is a nice homework trick. Divides correct by amount, assigns to 'calc'
calcx = (correct / amount)
# divides correct by amount; assigns to 'calcx'
text = "***%s*** \n %s | %d out of %d | %s or %s \n" % (date, subject, correct, amount, calc, calcx)
# creates what will be in your file. assigns to 'text'
print text
# prints what it will put in your file (or append).
fle = raw_input('What should I name the file to put the above data into? ')
# prompts for a filename
A = input('Do you want this to be appended to an existing file? ')
# decides to either append,or to create new file. assigns answer to 'A'
print 'Thanks! appending to file... '
if A is 'yes': #if you answered yes:
fyl = open(fle, 'a')
# the phrase 'fyl' is used to combine open('fle, 'a') with future commands
fyl.write(text)
# the command assigned to 'fyl' writes your data to the filename you said.
fyl.close()
# closes the file; job is done.
elif A is 'no': # if you said no, this will happen:
fyl = open(fle, 'w')
# same as before, but saves the file (see the 'w' instead of 'a'?)
fyl.write(text)
# same
fyl.close()
# same
else: # and if nothing was valid...
print 'Error! Invalid transaction! '
# ...error message!
print 'Done!'
# says it is done
raw_input("Press <RETURN> to quit.")
# makes you type <enter> to quit.
| 4.125 | 4 |
Scripts4orthology/orthology_analysis.py | tarunaaggarwal/G.morbida.Comp.Gen | 1 | 12799635 | <gh_stars>1-10
import sys
from collections import Counter
f = open(sys.argv[1], "r")
groups = sys.argv[2:]
one_to_one = []
ortho_plus_paralog = []
one_to_one_notallspecies = []
ortho_plus_paralog_notallspecies = []
group_single = {}
group_paralog = {}
for group in groups:
group_single[group] = []
group_paralog[group] = []
for line in f:
is_one_to_one = True
is_ortho_plus_paralog = True
for group in groups:
if group not in line:
is_one_to_one = False
is_ortho_plus_paralog = False
break
if line.count(group) > 1:
is_one_to_one = False
if is_one_to_one:
one_to_one.append(line)
elif is_ortho_plus_paralog:
ortho_plus_paralog.append(line)
else:
valid_group = True
cur_group = None
group_count = 0
for group in groups:
line_count = line.count(group)
if line_count == 0:
continue
if cur_group is not None:
valid_group = False
break
cur_group = group
group_count = line_count
if valid_group and cur_group is not None:
if line.count(cur_group) == 1:
group_single[cur_group].append(line)
else:
group_paralog[cur_group].append(line)
else:
other_one_to_one = True
for group in groups:
if line.count(group) > 1:
other_one_to_one = False
break
if other_one_to_one:
one_to_one_notallspecies.append(line)
else:
ortho_plus_paralog_notallspecies.append(line)
one_to_one_out = open("one_to_one.txt", "w")
ortho_plus_paralog_out = open("ortho_plus_paralog.txt", "w")
one_to_one_notallspecies_out = open("one_to_one_notallspecies.txt", "w")
ortho_plus_paralog_notallspecies_out = open("ortho_plus_paralog_notallspecies.txt", "w")
single_copy_out = open("single_copy.txt", "w")
paralog_out = open("paralogs.txt", "w")
for line in one_to_one:
one_to_one_out.write(line)
for line in one_to_one_notallspecies:
one_to_one_notallspecies_out.write(line)
for line in ortho_plus_paralog:
ortho_plus_paralog_out.write(line)
for line in ortho_plus_paralog_notallspecies:
ortho_plus_paralog_notallspecies_out.write(line)
for key in group_single:
single_copy_out.write("\n{} single copy\n\n".format(key))
for line in group_single[key]:
single_copy_out.write(line)
for key in group_paralog:
paralog_out.write("\n{} paralog\n\n".format(key))
for line in group_paralog[key]:
paralog_out.write(line)
paralog_out.close()
single_copy_out.close()
one_to_one_out.close()
one_to_one_notallspecies_out.close()
ortho_plus_paralog_out.close()
ortho_plus_paralog_notallspecies_out.close()
#
# for key in group_paralog:
# print("\n{} paralog\n".format(key))
# for line in group_paralog[key]:
# print(line.rstrip())
# print("One to One\n\n")
# for line in one_to_one:
# print(line)
# for line in ortho_plus_paralog:
# print(line)
# print("One to One\n\n")
# for line in one_to_one_notallspecies:
# print(line)
# for line in ortho_plus_paralog_notallspecies:
# print(line)
# print(Counter(map(lambda w: "/".join([group for group in groups if group in w]),
# f)))
| 2.890625 | 3 |
annotations/urls.py | connectik/digital-manifesto | 0 | 12799636 | <gh_stars>0
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from . import views, api_urls
urlpatterns = [
url(r'^api/', include(api_urls, namespace='api'))
]
| 1.46875 | 1 |
reading/migrations/0008_auto_20200401_1324.py | ericrobskyhuntley/vialab.mit.edu | 0 | 12799637 | # Generated by Django 3.0.4 on 2020-04-01 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reading', '0007_auto_20200331_2133'),
]
operations = [
migrations.RenameModel(
old_name='ReadingListMetadata',
new_name='ReadingMetadata',
),
]
| 1.65625 | 2 |
cfbackup/core.py | nordicdyno/cfbackup | 1 | 12799638 | """This module provides the main functionality of cfbackup
"""
from __future__ import print_function
import sys
import argparse
import json
import CloudFlare
# https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records
class CF_DNS_Records(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for DNS records manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show DSN records")
try:
records = self._all_records()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(records, indent=4))
return
records_by_type = {}
types = {}
for rec in records:
if not records_by_type.get(rec["type"]):
types[rec["type"]] = 0
records_by_type[rec["type"]] = []
types[rec["type"]] += 1
records_by_type[rec["type"]].append(rec)
for t in sorted(list(types)):
for rec in records_by_type[t]:
# print(json.dumps(rec, indent=4))
print("Type: {}".format(rec["type"]))
print("Name: {}".format(rec["name"]))
print("Content: {}".format(rec["content"]))
print("TTL: {}{}".format(
rec["ttl"],
" (auto)" if str(rec["ttl"]) == "1" else "",
))
print("Proxied: {}".format(rec["proxied"]))
print("Auto: {}".format(rec["meta"]["auto_added"]))
print("")
print("")
print("-------------------")
print("Records stat:")
print("-------------------")
print("{0: <11} {1: >4}".format("<type>", "<count>"))
for t in sorted(list(types)):
print("{0: <11} {1: >4}".format(t, types[t]))
print("-------------------")
print("{0: <11} {1: >4}".format("Total:", len(records)))
def _all_records(self):
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1})
if len(zones) == 0:
exit('No zones found')
zone_id = zones[0]['id']
cf_raw = CloudFlare.CloudFlare(raw=True)
page = 1
records = []
while True:
raw_results = cf_raw.zones.dns_records.get(
zone_id,
params={'per_page':100, 'page':page},
)
total_pages = raw_results['result_info']['total_pages']
result = raw_results['result']
for rec in result:
records.append(rec)
if page == total_pages:
break
page += 1
return records
# https://api.cloudflare.com/#zone-list-zones
class CF_Zones(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for zones manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show cf zones")
try:
zones = self._all_zones()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(zones, indent=4))
return
for z in zones:
print("Zone: {0: <16} NS: {1}".format(
z["name"],
z["name_servers"][0],
))
for ns in z["name_servers"][1:]:
print(" {0: <16} {1}".format("", ns))
def _all_zones(self):
cf = CloudFlare.CloudFlare(raw=True)
if self._ctx.zone_name:
raw_results = cf.zones.get(params={
'name': self._ctx.zone_name,
'per_page': 1,
'page': 1,
})
return raw_results['result']
page = 1
domains = []
while True:
raw_results = cf.zones.get(params={'per_page':5, 'page':page})
total_pages = raw_results['result_info']['total_pages']
zones = raw_results['result']
for z in zones:
domains.append(z)
if page == total_pages:
break
page += 1
return domains
COMMANDS = [
"show",
# "restore"
]
OBJECT_ENTRYPOINT = {
"zones": CF_Zones,
"dns": CF_DNS_Records,
}
def main():
"""Main entry"""
parser = argparse.ArgumentParser(
prog="cfbackup",
description='Simple Cloudflare backup tool.',
)
parser.add_argument(
"command",
choices=[x for x in COMMANDS],
help="command",
)
subparsers = parser.add_subparsers(
help='Object of command',
dest="object"
)
parser_zones = subparsers.add_parser("zones")
parser_zones.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
parser_zones.add_argument(
"-z", "--zone-name",
help="optional zone name",
)
parser_dns = subparsers.add_parser("dns")
parser_dns.add_argument(
"-z", "--zone-name",
required=True,
help="required zone name",
)
parser_dns.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
args = parser.parse_args()
OBJECT_ENTRYPOINT[args.object](args).run()
| 2.703125 | 3 |
run_http_measurements.py | kosekmi/quic-opensand-emulation | 0 | 12799639 | <gh_stars>0
# Original script: https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py
import re
import time
import selenium.common.exceptions
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as chromeOptions
import sys
from datetime import datetime
import hashlib
import uuid
import os
import csv
# performance elements to extract
measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete',
'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd',
'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize',
'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart',
'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error')
file_elements = ('pep', 'run')
# retrieve input params
try:
protocol = sys.argv[1]
server = sys.argv[2]
chrome_path = sys.argv[3]
output_dir = sys.argv[4]
file_elements_values = sys.argv[5].split(';')
except IndexError:
print("Input params incomplete (protocol, server, chrome_driver, output_dir)")
sys.exit(1)
if len(file_elements) != len(file_elements_values):
print("Number of file elements does not match")
sys.exit(1)
# Chrome options
chrome_options = chromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
if protocol == 'quic':
chrome_options.add_argument('--enable-quic')
chrome_options.add_argument('--origin-to-force-quic-on=example.com:443')
chrome_options.add_argument('--allow_unknown_root_cer')
chrome_options.add_argument('--disable_certificate_verification')
chrome_options.add_argument('--ignore-urlfetcher-cert-requests')
chrome_options.add_argument(f"--host-resolver-rules=MAP example.com {server}")
chrome_options.add_argument('--verbose')
chrome_options.add_argument('--disable-http-cache')
# Function to create: openssl x509 -pubkey < "pubkey.pem" | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64 > "fingerprints.txt"
chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=')
def create_driver():
return webdriver.Chrome(options=chrome_options, executable_path=chrome_path)
def get_page_performance_metrics(driver, page):
script = """
// Get performance and paint entries
var perfEntries = performance.getEntriesByType("navigation");
var paintEntries = performance.getEntriesByType("paint");
var entry = perfEntries[0];
var fpEntry = paintEntries[0];
var fcpEntry = paintEntries[1];
// Get the JSON and first paint + first contentful paint
var resultJson = entry.toJSON();
resultJson.firstPaint = 0;
resultJson.firstContentfulPaint = 0;
try {
for (var i=0; i<paintEntries.length; i++) {
var pJson = paintEntries[i].toJSON();
if (pJson.name == 'first-paint') {
resultJson.firstPaint = pJson.startTime;
} else if (pJson.name == 'first-contentful-paint') {
resultJson.firstContentfulPaint = pJson.startTime;
}
}
} catch(e) {}
return resultJson;
"""
try:
driver.set_page_load_timeout(60)
if protocol == 'quic':
driver.get(f'https://{page}')
else:
driver.get(f'http://{page}')
return driver.execute_script(script)
except selenium.common.exceptions.WebDriverException as e:
return {'error': str(e)}
def perform_page_load(page, cache_warming=0):
driver = create_driver()
timestamp = datetime.now()
performance_metrics = get_page_performance_metrics(driver, page)
# insert page into database
if 'error' not in performance_metrics:
# Print page source
# print(driver.page_source)
driver.save_screenshot(f'{output_dir}/screenshot.png')
insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming)
else:
insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming,
error=performance_metrics['error'])
driver.quit()
def create_measurements_table():
new = False
global local_csvfile
file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv'
if os.path.isfile(file_path):
local_csvfile = open(file_path, mode='a')
else:
local_csvfile = open(file_path, mode='w')
new = True
global csvfile
csvfile = csv.writer(local_csvfile, delimiter=';')
if new == True:
headers = file_elements + measurement_elements
csvfile.writerow(headers)
def insert_performance(page, performance, timestamp, cache_warming=0, error=''):
performance['protocol'] = protocol
performance['server'] = server
performance['domain'] = page
performance['timestamp'] = timestamp
performance['cacheWarming'] = cache_warming
performance['error'] = error
values = file_elements_values.copy()
for m_e in measurement_elements:
values.append(performance[m_e])
csvfile.writerow(values)
create_measurements_table()
# performance measurement
perform_page_load("example.com")
local_csvfile.close()
| 2.578125 | 3 |
ohsomeTools/OhsomeToolsPlugin.py | GIScience/ohsome-qgis-plugin | 3 | 12799640 | <reponame>GIScience/ohsome-qgis-plugin
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ohsomeTools
A QGIS plugin
QGIS client to query the ohsome API
-------------------
begin : 2021-05-01
git sha : $Format:%H$
copyright : (C) 2021 by <NAME>
email : <EMAIL>
***************************************************************************/
This plugin provides access to the ohsome API (https://api.ohsome.org),
developed and maintained by the Heidelberg Institute for Geoinformation
Technology, HeiGIT gGmbH, Heidelberg, Germany.
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import QgsApplication
from .gui import OhsomeToolsDialog
from .proc import provider
class OhsomeTools:
"""QGIS Plugin Implementation."""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface)
self.provider = provider.OhsomeToolsProvider()
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
QgsApplication.processingRegistry().addProvider(self.provider)
self.dialog.initGui()
def unload(self):
"""remove menu entry and toolbar icons"""
QgsApplication.processingRegistry().removeProvider(self.provider)
self.dialog.unload()
| 1.085938 | 1 |
dashmat/custom/reviews/main.py | realestate-com-au/python-dashing-deploy | 0 | 12799641 | <filename>dashmat/custom/reviews/main.py
from dashmat.core_modules.base import Module
class Reviews(Module):
@classmethod
def dependencies(kls):
yield "dashmat.core_modules.components.main:Components"
| 1.460938 | 1 |
scripts/load_sim.py | ori-goals/lfd-min-human-effort | 1 | 12799642 | <filename>scripts/load_sim.py
#!/usr/bin/env python
from learn_to_manipulate.simulate import Simulation
import rospy
if __name__ == "__main__" :
rospy.init_node('learn_to_manipulate')
sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl')
case_number = 10
sim.run_new_episode(case_number, controller_type = 'learnt')
| 1.953125 | 2 |
web_api/yonyou/reqparsers/enum_items.py | zhanghe06/flask_restful | 1 | 12799643 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: enum_items.py
@time: 2018-08-23 15:55
"""
from __future__ import unicode_literals
structure_key_item = 'enum_item'
structure_key_items = 'enum_items'
structure_key_item_cn = '枚举类型'
structure_key_items_cn = '枚举类型'
| 1.3125 | 1 |
awesomo.py | buurz-forks/awesomo | 0 | 12799644 | <filename>awesomo.py
print("Hello, AWESOME-O!")
| 1.265625 | 1 |
goto/gotomagic/text/__init__.py | technocake/goto | 10 | 12799645 | <reponame>technocake/goto
# -*- coding: utf-8 -*-
"""
Text used by GOTO to do UX.
"""
from .text import GotoError, GotoWarning, print_text | 1.289063 | 1 |
covid_api/core/__init__.py | NASA-IMPACT/covid-api | 14 | 12799646 | <gh_stars>10-100
"""covid_api.core"""
| 0.859375 | 1 |
setup.py | NewStore-oss/json-encoder | 0 | 12799647 | import os
from setuptools import setup
VERSION = "1.0.4"
NAMESPACE = "newstore"
NAME = "{}.json_encoder".format(NAMESPACE)
def local_text_file(file_name):
path = os.path.join(os.path.dirname(__file__), file_name)
with open(path, "rt") as fp:
file_data = fp.read()
return file_data
setup(
name=NAME,
version=VERSION,
description="JSONEncoder",
long_description=local_text_file("README.md"),
long_description_content_type="text/markdown",
author="NewStore Inc.",
author_email="<EMAIL>",
url="https://github.com/NewStore-oss/json-encoder",
zip_safe=True,
packages=[NAME],
namespace_packages=[NAMESPACE],
python_requires=">=3.6,<3.9",
package_data={NAME: []},
install_requires=["setuptools"],
)
| 1.984375 | 2 |
generalgui/properties/funcs.py | Mandera/generalgui | 1 | 12799648 | from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer
def set_parent_hook(self, parent, _draw=True):
""" :param generalgui.MethodGrouper self:
:param generalgui.MethodGrouper parent: """
if _draw:
for part in self.get_children(depth=-1, include_self=True, gen=True):
part.draw_create()
assert "Contain" in getBaseClassNames(parent) or parent is None
class PartBaseClass:
def draw_create_hook(self, kwargs):
""" Used to decouple properties, called by draw_create which is called by init and set_parent. """
def draw_create_post_hook(self):
""" Called after widget is packed. """
def _deco_draw_queue(func):
""" Append one order to dict for this func call.
Creates a key with id of Part and func's name.
If key exists as an old order then it's removed.
Returns key unless draw_now is True. """
def _wrapper(*args, **kwargs):
sigInfo = SigInfo(func, *args, **kwargs)
methodGrouper = sigInfo["self"]
orders = methodGrouper.orders
key = methodGrouper.get_order_key(func)
if sigInfo["draw_now"]:
orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page.
sigInfo.call()
else:
orders[key] = sigInfo
return key
# Could possibly do something like this to skip queue instead of drawing instantly
# if sigInfo["draw_now"]:
# dict_insert(orders, **{key: sigInfo})
# else:
# orders[key] = sigInfo
return wrapper_transfer(func, _wrapper)
| 2.3125 | 2 |
src/os_aio_pod/main.py | zanachka/os-aio-pod | 4 | 12799649 | <filename>src/os_aio_pod/main.py
from os_aio_pod.cmdline import execute
def main():
command_packages = ["os_aio_pod.commands"]
execute(command_packages=command_packages)
| 1.34375 | 1 |
MaxiNet/WorkerServer/tst_driver.py | bramamurthy/P4SwitchesInMaxiNet | 1 | 12799650 | <filename>MaxiNet/WorkerServer/tst_driver.py<gh_stars>1-10
#!/usr/bin/python2
#
# This is a sample program to emulate P4 Switches in Distributed environment
# using Maxinet. The skeleton application program should be like this
#
import argparse
import atexit
import logging
import os
import signal
import subprocess
import sys
import tempfile
import time
import Pyro4
import threading
import traceback
import json
import mininet.term
from mininet.topo import Topo
from mininet.node import OVSSwitch
from mininet.node import UserSwitch, OVSSwitch
from mininet.link import Link, TCIntf
from mininet.net import Mininet
# from MaxiNet.Frontend import maxinet
from MaxiNet.tools import Tools, MaxiNetConfig
from MaxiNet.WorkerServer.ssh_manager import SSH_Manager
from run_exercise import ExerciseRunner
from p4_mininet import P4Switch
from shutil import *
import pdb
# Include Project Directory in PYTHONPATH
# This is done to pickup changes done by us in MaxiNet Frontend
curr_path = os.getcwd()
parent_path = os.path.abspath(os.path.join(os.getcwd(), '..'))
parent_dir = os.path.basename(os.path.abspath(parent_path))
sys.path.insert(1,parent_path)
from Frontend import maxinet
# create topology
myglobalTopo = Topo()
parser = argparse.ArgumentParser()
parser.add_argument('--topo', dest="topo_fname", default="/tmp/in_topo.json", help = "Input Topology file for Experiment")
parser.add_argument('--swlog_dir', dest="swlog_dir", default="/tmp", help = "Directory path for Switch Log files ")
parser.add_argument('--pcap_dir', dest="pcap_dir", default="/tmp", help = "Directory path for Switch pcap files ")
parser.add_argument('--switch_json', dest="switch_json", default="/tmp/routernew.json", help = "P4 Switch Parser JSON")
# parser.add_argument('--switch_json', dest="switch_json", default="/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json", help = "P4 Switch Parser JSON")
parser.add_argument('--switch_exe', dest="switch_exe",default="/home/rbabu/behavioral-model/targets/simple_router/simple_router", help="P4 Switch Executable")
parser.add_argument('--mininet_cli', dest="cli_opt", default="False", help = "Invoke at Mininet CLI in the Workers")
parser.add_argument('--switch_init', dest="swinit_opt", default="AtStart", help = "Switch Initialization AtStart | ByApp")
parser.add_argument('--num_workers', dest="num_workers", default=1, help = "Number of Workers for the Experiment : (Default 1) ")
args = parser.parse_args()
if args.topo_fname :
topo_fname = str(args.topo_fname)
print "Input Topo File Name is ...", topo_fname
if args.swlog_dir :
swlog_dir = str(args.swlog_dir)
print "Switch Log Dir ...", swlog_dir
if args.pcap_dir :
pcap_dir = str(args.pcap_dir)
print "Pcap Dir ...", pcap_dir
if args.switch_json :
switch_json = str(args.switch_json)
print "Switch Parser JSON File Name ...", switch_json
if args.switch_exe :
switch_exe = str(args.switch_exe)
print "Switch EXE Name ...", switch_exe
if args.cli_opt :
cli_opt = str(args.cli_opt)
print "Mininet CLI Option ...", cli_opt
if args.swinit_opt :
swinit_opt = str(args.swinit_opt)
print "Switch Init Option ...", swinit_opt
if args.num_workers :
num_workers = int(args.num_workers)
print "Number of Workers ...", num_workers
# Now save the Input CLI arguments in experiment.cfg file
# Num workers argument is not saved in experiment.cfg file
f = open("t1_experiment.cfg", "w")
out_line="topo_file_name=/tmp/in_topo.json" # This is going to be hardcoded
print >>f, out_line
out_line="swlog_dir="+str(swlog_dir)
print >>f, out_line
out_line="pcap_dir="+str(pcap_dir)
print >>f, out_line
out_line="p4_switch_json="+str(switch_json)
print >>f, out_line
out_line="bmv2_exe="+str(switch_exe)
print >>f, out_line
out_line="Invoke_mininet_cli="+str(cli_opt)
print >>f, out_line
out_line="p4_switch_initialization="+str(swinit_opt)
print >>f, out_line
f.close()
# Rename the file t1_experiment.cfg -> experiment.cfg
os.rename("t1_experiment.cfg", "experiment.cfg")
# Now also copy the given input topo file as in_top.json in each of worker
copy2(topo_fname,'in_topo.json')
print "File sucessfully copied as in_topo.json..."
with open('in_topo.json') as data_file:
data = json.load(data_file)
hnames = data["hosts"]
hlen = len(hnames)
cnt = 1
for x in range(0,hlen) :
tmp = str(hnames[x])
myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt))
cnt = cnt + 1
my_swlist=[]
for key, value in dict.items(data["switches"]):
my_swlist.append(key) # Add to list of switches in topology
cnt = 1
for value1, value2 in dict.items(data["switches"][key]):
tmp = str(key)
myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt))
cnt = cnt + 1
#hnames = data["hosts"]
hnames = data["links"]
hlen = len(hnames)
for x in range(0,hlen) :
tmp = str(hnames[x][0])
tmp1 = str(hnames[x][1])
myglobalTopo.addLink(tmp, tmp1)
print "Finished Loading Topology..."
print "Creating Cluster ..."
# start cluster
cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers)
# start experiment with P4Switch on cluster
exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch)
# We can copy experiment.cfg, in_topo.json files to the respective workers
my_allowed_paths = []
for item in dict.items( data["allowed_paths"] ):
my_allowed_paths.append(item)
allowed_paths_len = len(my_allowed_paths)
my_workers = cluster.workers()
for worker in my_workers :
"Copying to Worker 1...", worker
worker.put_file("experiment.cfg", "/tmp/experiment.cfg")
worker.put_file("in_topo.json", "/tmp/in_topo.json")
if (allowed_paths_len <= 0):
print "No Need to Create switch JSON file..."
worker.put_file("simple_router.json", "/tmp/routernew.json")
else :
print "Create New switch JSON file..."
# Assumption is that the input topo is in file named in_topo.json
os.system('python gen_router_json.py')
worker.put_file("routernew.json", "/tmp/routernew.json")
print "***** Experiment Setup Start *****"
exp.setup()
print "waiting 10 seconds for routing algorithms on the controller to converge"
time.sleep(10)
# Try to do a pingall hosts
hnames = data["hosts"]
hlen = len(hnames)
for host in hnames:
for nxthost in hnames:
if host != nxthost :
print "pinging from ..", host ," -> ", nxthost, " to check network connectivity ..."
nxt_hnum = int(nxthost[1:])
tmp_hname = str(nxt_hnum)
rcmd = "ping -c 1 10.0." + tmp_hname + ".10"
print "Rcmd is ..", rcmd
print exp.get_node(host).cmd(rcmd)
if (swinit_opt == "ByApp") :
break
print "Program Switch objects as per topology ..."
raw_input("[Continue...]")
for sw in my_swlist :
exp.program_myswitch(sw)
for host, my_cmd in data["host_cmnds"] :
print "Execute Command on Host ...", host
print "Command Monitor on Host ...", my_cmd
print exp.get_node(host).cmd(my_cmd)
raw_input("[Continue...]")
# print exp.get_node("h2").cmd("python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & ")
raw_input("[Continue...]")
# exp.get_node("s2").cmd("tc qdisc change dev s2-eth1 root netem delay 200ms")
# exp.get_node("s2").cmd("tc qdisc change dev s2-eth2 root netem delay 200ms")
# exp.get_node("s2").cmd("tc qdisc add dev mn_tun0 root netem delay 500ms")
# exp.get_node("s3").cmd("tc qdisc add dev mn_tun0 root netem delay 500ms")
# exp.get_node("s3").cmd("tc qdisc change dev s3-eth1 root netem delay 300ms")
# exp.get_node("s3").cmd("tc qdisc change dev s3-eth2 root netem delay 300ms")
# exp.get_node("s6").cmd("tc qdisc add dev mn_tun1 root netem delay 600ms")
# exp.get_node("s5").cmd("tc qdisc add dev mn_tun1 root netem delay 600ms")
raw_input("[Continue...]")
print "Switch Class ..."
print exp.switch
raw_input("[Continue...]")
for host in hnames:
for nxthost in hnames:
if host != nxthost :
print "pinging from ..", host ," -> ", nxthost, " to check network connectivity ..."
nxt_hnum = int(nxthost[1:])
tmp_hname = str(nxt_hnum)
rcmd = "ping -c 1 10.0." + tmp_hname + ".10"
print "Rcmd is ..", rcmd
print exp.get_node(host).cmd(rcmd)
exp.CLI(locals(),globals())
raw_input("[Continue...]")
exp.stop()
raw_input("[Continue]") # wait for user to acknowledge network connectivity
| 2.203125 | 2 |