blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
sequencelengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c53690190a7e8fbbb17c7d9c5df29272f260dfd8 | 7081b6feb13d983f5e4dc78d25e6f85deb951cab | /raw_pillow_opener/__init__.py | 13fd15379a0f8e8fd4f81f5c8972c43acde3e3a6 | [
"MIT"
] | permissive | samuelduann/raw-pillow-opener | 82e264eaf6a76ac7b8ecf1cd89457a4bcbd257f3 | ad0d7e29a5df25e7bd719fa0560193773a2125bb | refs/heads/main | 2023-02-20T11:15:22.383124 | 2021-01-18T02:49:56 | 2021-01-18T02:49:56 | 329,980,667 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding: utf-8 -*-
import rawpy
from PIL import Image, ImageFile
class RawImageFile(ImageFile.ImageFile):
format = 'RAW'
format_description = "camera raw image"
def _open(self):
raw = rawpy.imread(self.fp)
array = raw.postprocess()
# size in pixels (width, height)
self._size = (array.shape[1], array.shape[0])
# mode setting
typekey = (1, 1) + array.shape[2:], array.__array_interface__["typestr"]
try:
self.mode = Image._fromarray_typemap[typekey][1]
except KeyError as e:
raise TypeError("Cannot handle this data type: %s, %s" % typekey) from e
# TODO extract exif?
offset = self.fp.tell()
self.tile = [
('RAW', (0, 0) + self.size, offset, (array, self.mode,))
]
class RawDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer):
(data, mode) = self.args[0], self.args[1]
raw_decoder = Image._getdecoder(mode, 'raw', (mode, data.strides[0]))
raw_decoder.setimage(self.im)
return raw_decoder.decode(data)
def register_raw_opener():
Image.register_open('RAW', RawImageFile)
Image.register_decoder('RAW', RawDecoder)
Image.register_extensions(RawImageFile.format, ['.nef', 'cr2', 'dng'])
| [
"[email protected]"
] | |
727c6dd5a9d6d63154d4df935778852dc73c00fa | c590571d129ead00bd1916025f854a1719d75683 | /zvt/recorders/joinquant/meta/china_stock_meta_recorder.py | fa4a0c4364dd713ab0f74d8b7829a1b6f86f10ac | [
"MIT"
] | permissive | ming123jew/zvt | f2fb8e157951e9440a6decd5ae0c08ea227a39db | de66a48ad2a3ac2c3fb22b9ea17a85f28e95cc62 | refs/heads/master | 2023-05-28T15:00:52.015084 | 2021-06-13T12:56:18 | 2021-06-13T12:56:18 | 570,070,597 | 1 | 0 | MIT | 2022-11-24T09:16:48 | 2022-11-24T09:16:47 | null | UTF-8 | Python | false | false | 5,733 | py | # -*- coding: utf-8 -*-
import pandas as pd
from jqdatapy.api import get_all_securities, run_query
from zvt.api.quote import china_stock_code_to_id, portfolio_relate_stock
from zvt.contract.api import df_to_db, get_entity_exchange, get_entity_code
from zvt.contract.recorder import Recorder, TimeSeriesDataRecorder
from zvt.domain import EtfStock, Stock, Etf, StockDetail
from zvt.recorders.joinquant.common import to_entity_id, jq_to_report_period
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str
class BaseJqChinaMetaRecorder(Recorder):
provider = 'joinquant'
def __init__(self, batch_size=10, force_update=True, sleeping_time=10) -> None:
super().__init__(batch_size, force_update, sleeping_time)
def to_zvt_entity(self, df, entity_type, category=None):
df = df.set_index('code')
df.index.name = 'entity_id'
df = df.reset_index()
# 上市日期
df.rename(columns={'start_date': 'timestamp'}, inplace=True)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['list_date'] = df['timestamp']
df['end_date'] = pd.to_datetime(df['end_date'])
df['entity_id'] = df['entity_id'].apply(lambda x: to_entity_id(entity_type=entity_type, jq_code=x))
df['id'] = df['entity_id']
df['entity_type'] = entity_type
df['exchange'] = df['entity_id'].apply(lambda x: get_entity_exchange(x))
df['code'] = df['entity_id'].apply(lambda x: get_entity_code(x))
df['name'] = df['display_name']
if category:
df['category'] = category
return df
class JqChinaStockRecorder(BaseJqChinaMetaRecorder):
data_schema = Stock
def run(self):
# 抓取股票列表
df_stock = self.to_zvt_entity(get_all_securities(code='stock'), entity_type='stock')
df_to_db(df_stock, data_schema=Stock, provider=self.provider, force_update=self.force_update)
# persist StockDetail too
df_to_db(df=df_stock, data_schema=StockDetail, provider=self.provider, force_update=self.force_update)
# self.logger.info(df_stock)
self.logger.info("persist stock list success")
class JqChinaEtfRecorder(BaseJqChinaMetaRecorder):
data_schema = Etf
def run(self):
# 抓取etf列表
df_index = self.to_zvt_entity(get_all_securities(code='etf'), entity_type='etf', category='etf')
df_to_db(df_index, data_schema=Etf, provider=self.provider, force_update=self.force_update)
# self.logger.info(df_index)
self.logger.info("persist etf list success")
class JqChinaStockEtfPortfolioRecorder(TimeSeriesDataRecorder):
entity_provider = 'joinquant'
entity_schema = Etf
# 数据来自jq
provider = 'joinquant'
data_schema = EtfStock
def __init__(self, entity_type='etf', exchanges=['sh', 'sz'], entity_ids=None, codes=None, day_data=True, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, day_data, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
def record(self, entity, start, end, size, timestamps):
df = run_query(table='finance.FUND_PORTFOLIO_STOCK',
conditions=f'pub_date#>=#{to_time_str(start)}&code#=#{entity.code}',
parse_dates=None)
if pd_is_not_null(df):
# id code period_start period_end pub_date report_type_id report_type rank symbol name shares market_cap proportion
# 0 8640569 159919 2018-07-01 2018-09-30 2018-10-26 403003 第三季度 1 601318 中国平安 19869239.0 1.361043e+09 7.09
# 1 8640570 159919 2018-07-01 2018-09-30 2018-10-26 403003 第三季度 2 600519 贵州茅台 921670.0 6.728191e+08 3.50
# 2 8640571 159919 2018-07-01 2018-09-30 2018-10-26 403003 第三季度 3 600036 招商银行 18918815.0 5.806184e+08 3.02
# 3 8640572 159919 2018-07-01 2018-09-30 2018-10-26 403003 第三季度 4 601166 兴业银行 22862332.0 3.646542e+08 1.90
df['timestamp'] = pd.to_datetime(df['pub_date'])
df.rename(columns={'symbol': 'stock_code', 'name': 'stock_name'}, inplace=True)
df['proportion'] = df['proportion'] * 0.01
df = portfolio_relate_stock(df, entity)
df['stock_id'] = df['stock_code'].apply(lambda x: china_stock_code_to_id(x))
df['id'] = df[['entity_id', 'stock_id', 'pub_date', 'id']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
df['report_date'] = pd.to_datetime(df['period_end'])
df['report_period'] = df['report_type'].apply(lambda x: jq_to_report_period(x))
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
# self.logger.info(df.tail())
self.logger.info(f"persist etf {entity.code} portfolio success {df.iloc[-1]['pub_date']}")
return None
if __name__ == '__main__':
# JqChinaEtfRecorder().run()
JqChinaStockEtfPortfolioRecorder(codes=['510050']).run()
# the __all__ is generated
__all__ = ['BaseJqChinaMetaRecorder', 'JqChinaStockRecorder', 'JqChinaEtfRecorder', 'JqChinaStockEtfPortfolioRecorder'] | [
"[email protected]"
] | |
da4e65994020ecec1aae6923a1bd83b3951032e3 | a90ba084b85683f4c52d0e638cfb6108207ced38 | /896.py | 91ca187efe65342ba1e072994842f422f065f605 | [] | no_license | JiayuZhai/leetcode_python3 | 4a9260d00a52cde9ec37e6292e64d04161e66111 | 5755c3edd6d949af18d0247d2103379510dfab85 | refs/heads/master | 2020-04-02T21:22:42.270736 | 2019-03-29T23:28:48 | 2019-03-29T23:28:48 | 154,796,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | class Solution:
def isMonotonic(self, A: List[int]) -> bool:
sortA = sorted(A)
return (A == sortA or list(reversed(A)) == sortA)
| [
"[email protected]"
] | |
f264cc280e130b88e79446f5aed42501f03d37b9 | 72978a95cc5cedf2cb9e2ba96effa77ced565296 | /zamg/app/app.py | ab4e8b11a1bb286ece7b04f138494986e7fbadb5 | [] | no_license | vadeanu/ULG-PROJEKT-ZAMG | b418b4b0df20801f6f60fa7bc6790f29a8e16a37 | 0f4823da8ed2989f02c985d0740a1bd7e6b40c5b | refs/heads/master | 2022-12-22T21:11:53.098494 | 2020-09-25T16:12:30 | 2020-09-25T16:12:30 | 275,172,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,265 | py | from typing import List, Dict
from flask import Flask
import logging
from flask_mysqldb import MySQL
from crawler import Crawler
from xmlparser import XmlParser
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
app.config['MYSQL_HOST'] = 'mysql_database'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'zamg'
app.config['MYSQL_DB'] = 'zamg'
mysql = MySQL(app)
def getThesis():
cursor = mysql.connection.cursor()
cursor.execute('SELECT urn, title, author, supervisor, university FROM thesis')
results = cursor.fetchall()
cursor.close()
return results
def deletethesis():
cursor = mysql.connection.cursor()
cursor.execute('delete from thesis')
mysql.connection.commit()
def crawlthesis():
cursor = mysql.connection.cursor()
serverurls = { 'http://apache_uni1', 'http://apache_uni2', 'http://apache_uni3'}
c = Crawler()
parser = XmlParser()
for url in serverurls:
app.logger.info('crawling ' + url)
c.crawl(url + "/thesis.txt")
if c.wasSuccessful():
for filename in c.getText().splitlines():
thesisurl = url + "/" + filename
app.logger.info('crawling ' + thesisurl)
c.crawl(thesisurl)
if c.wasSuccessful():
thesis = parser.readXML(c.getText())
print(thesis.inspect())
# die folgenden set methoden befüllen das Objekt technisch korrekt, damit es abgespeichert werden kann. Diese Zeilen müssen natürlich wieder raus
# thesis.setUrn("urn1")
# thesis.setTitle("titel1")
# thesis.setAuthor("author1")
# thesis.setLanguage("language1")
# thesis.setSupervisor("Supervisor1")
# thesis.setGenre("genre1")
# thesis.setUniversity("uni1")
# thesis.setProduction("1980")
if thesis.isValid():
# check ob thesis schon eingefuegt
sql = "select count(*) from thesis where urn = '" + thesis.getUrn() + "'";
cursor.execute(sql)
result = cursor.fetchone()
numberOfRows = result[0]
if numberOfRows > 0:
app.logger.info("Thesis " + thesis.getUrn() + " befindet sich schon in der Datenbank")
continue
sql = "insert into thesis (urn, title, subtitle, author, language, supervisor, sec_supervisor, genre, university, production, abstract) values (" \
"'" + thesis.getUrn() + "', " \
"'" + thesis.getTitle() + "', " \
"'" + thesis.getSubtitle() + "', " \
"'" + thesis.getAuthor() + "', " \
"'" + thesis.getLanguage() + "', " \
"'" + thesis.getSupervisor() + "', " \
"'" + thesis.getSec_supervisor() + "', " \
"'" + thesis.getGenre() + "', " \
"'" + thesis.getUniversity() + "', " \
"'" + thesis.getProduction() + "', " \
"'" + thesis.getAbstract() + "'" \
")"
cursor.execute(sql)
mysql.connection.commit()
app.logger.info("Thesis " + thesis.getUrn() + " in der Datenbank gespeichert.")
else:
app.logger.info("Thesis kann nicht importiert werden. Nicht valide.")
else:
app.logger.info("crawl von " + thesisurl + " war nicht erfolgreich.")
def generateHtml():
list = getThesis()
html = '<h1>Thesis</h1>'
html = html + '<a href="/">Home</a> | <a href="/crawlthesis">crawl thesis</a> | <a href="/deletethesis">delete thesis</a><p/>'
#SELECT urn, title, author, supervisor, university, production FROM thesis'
if len(list) > 0:
html = html + '<table border="1">'
html = html + '<tr><th>urn</th><th>title</th><th>author</th><th>supervisor</th><th>university</th></tr>'
#html = html + '<tr><th>urn</th><th>title</th><th>author</th><th>supervisor</th><th>university</th><th>production</th></tr>'
for item in list:
html = html + '<tr><td>' + item[0] + '</td><td>' + item[1] + '</td><td>' + item[2] + '</td><td>' + item[3] +'</td><td>' + item[4] + '</td></tr>'
#html = html + '<tr><td>' + item[0] + '</td><td>' + item[1] + '</td><td>' + item[2] + '</td><td>' + item[3] + '</td><td>' + item[4] + '</td><td>' + item[5] + '</td><td>' + item[6] + '</td></tr>'
html = html + '</table>'
else:
html = html + '<p>keine Daten</p>'
return html
@app.route('/')
def index():
return generateHtml()
@app.route('/deletethesis')
def delete():
deletethesis()
return generateHtml()
@app.route('/crawlthesis')
def crawl():
crawlthesis()
return generateHtml()
if __name__ == '__main__':
app.run(host='0.0.0.0') | [
"[email protected]"
] | |
8a9fcc67b691b3c5273bc8cc75d7b4a59213702e | f44e38367d40b25731b41f13d910e045c5db99b1 | /chapter_1/lesson6_step11_exception.py | c8cf3da310c6dce65b02d216f95d51828fdf1ceb | [] | no_license | RolandMiles/Stepik_AutoTest | 5371512273ed10246c86d541cfb6138a8a4a7e03 | 5b14fed6727a7e9203ce87a0288979ce448e0235 | refs/heads/master | 2023-09-03T18:44:31.329606 | 2021-10-21T14:57:16 | 2021-10-21T14:57:16 | 418,937,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | from selenium import webdriver
import time
try:
# Ссылка на регистрацию для успешного выполнения теста
#link = "http://suninjuly.github.io/registration1.html"
# Ссылка на регистрацию для неудачного выполнения теста
link = "http://suninjuly.github.io/registration2.html"
browser = webdriver.Chrome()
browser.get(link)
# Ваш код, который заполняет обязательные поля
# First name
element = browser.find_element_by_css_selector(".first_block .first")
element.send_keys(".first_block .first")
# Last name
element = browser.find_element_by_css_selector(".first_block .second")
element.send_keys(".first_block .second")
# Email
element = browser.find_element_by_css_selector(".first_block .third")
element.send_keys(".first_block .third")
# Phone
element = browser.find_element_by_css_selector(".second_block .first")
element.send_keys(".second_block .first")
# Address
element = browser.find_element_by_css_selector(".second_block .second")
element.send_keys(".second_block .second")
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
# находим элемент, содержащий текст
welcome_text_elt = browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# Ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit() | [
"[email protected]"
] | |
84f15ba18a56d28a2f932f51a09526125c3bd6cc | 04830e72ec246706a943aeea7ac5e45a705f4c3e | /haddScript.py | 30a29313adf8f3b073a96c702e2830be6e1fc82b | [] | no_license | dmajumder/X_YH_4b | e72a8ef0351860550e955b62756297a51754108a | ad98a38af012cf8beaf8b2532753b2873f40fbbf | refs/heads/master | 2023-02-02T08:24:40.521990 | 2020-12-22T13:19:40 | 2020-12-22T13:19:40 | 272,053,599 | 0 | 0 | null | 2020-06-13T17:18:12 | 2020-06-13T17:18:11 | null | UTF-8 | Python | false | false | 166 | py | import os
directories=[d for d in os.listdir(os.getcwd()) if os.path.isdir(d)]
for d in directories:
cmd = "hadd -f {0}.root {0}/*root".format(d)
print(cmd)
| [
"[email protected]"
] | |
79270ecfc3449f0f37b2c5ef62acac1dda9275cf | ee51d2ca3ff763aa29e1d105ce817ad926960948 | /code_samples/tsp.py | 8e3cc0b8a7e00f625b3e73ad951433c84adfa80d | [] | no_license | robmfulton/travelingCA | 9ca9082eefd9e1125ce5ea80ecf5d12942bbfa45 | 59b5838ba63cddf23d2f875f284c2ea0e779f6ac | refs/heads/master | 2021-01-01T17:08:12.762172 | 2017-07-20T06:56:59 | 2017-07-20T06:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import random, numpy, math, copy, matplotlib.pyplot as plt
cities = [random.sample(range(18), 2) for x in range(15)];
tour = random.sample(range(15), 15);
for temperature in numpy.logspace(0, 5, num=1)[::-1]:
[i, j] = sorted(random.sample(range(15), 2));
newTour = tour[:i] + tour[j:j + 1] + tour[i + 1:j] + tour[i:i + 1] + tour[j + 1:];
if math.exp((sum(
[math.sqrt(sum([(cities[tour[(k + 1) % 15]][d] - cities[tour[k % 15]][d]) ** 2 for d in [0, 1]])) for k in
[j, j - 1, i, i - 1]]) - sum(
[math.sqrt(sum([(cities[newTour[(k + 1) % 15]][d] - cities[newTour[k % 15]][d]) ** 2 for d in [0, 1]])) for
k in [j, j - 1, i, i - 1]])) / temperature) > random.random():
tour = copy.copy(newTour);
plt.plot(list(zip(*[cities[tour[i % 15]] for i in range(16)]))[0], list(zip(*[cities[tour[i % 15]] for i in range(16)]))[1],
'xb-', );
plt.show()
| [
"[email protected]"
] | |
90a5ad57cf62d7082f693f949d412f2d773b647a | 844c7f8fb8d6bfab912583c71b93695167c59764 | /fixação/Seção06/51-60/Sec06Ex51v2.py | 35580169e28f8bc9bc58b28718531dd96aa9d948 | [
"Apache-2.0"
] | permissive | gugajung/guppe | 2be10656cd9aa33be6afb8e86f20df82662bcc59 | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | refs/heads/main | 2023-05-28T08:08:24.963356 | 2021-06-07T16:56:11 | 2021-06-07T16:56:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | from datetime import date
anoAtual = 1995
salarioAtual = 2000
percentAumento = 1.5
dataAtual = date.today()
anoReal = dataAtual.year
while anoAtual <= anoReal:
salarioAtual = salarioAtual + ((salarioAtual*percentAumento)/100)
print("----------------------------------------")
print(" --- debug")
print(f" --- > Ano Atual : {anoAtual}")
print(f" --- > Salario Atual : {salarioAtual:.2f}")
print(f" --- > Percente de Aumento : {percentAumento:.4f}")
anoAtual += 1
percentAumento *= 2
print("=================================================")
print("Final de O programas") | [
"[email protected]"
] | |
1649e8979efe95fc57fba8536d507cc3fe6de5dc | 0d1c96738f67b63b3e05659f71e995fd26306432 | /Linked List/linkedList1.py | c2f120b561650836528dc7685e67297cbef0b679 | [] | no_license | taufikfathurahman/DS_python | b7364e22bd92faeeb39034c4141c9e39e0e05730 | 2142c2d5554823c2cd7a5dbf11f1cf0a2432252b | refs/heads/master | 2020-04-27T23:31:32.694292 | 2019-03-10T04:52:23 | 2019-03-10T04:52:23 | 174,779,310 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | # Node class
class Node:
# Function to initialise the node object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
class LinkedList:
# Function to initialize head
def __init__(self):
self.head = None
# This function prints content of linked list
# starting from head
def printList(self):
temp = self.head
while (temp):
print(temp.data)
temp = temp.next
# Code execution starts here
if __name__=='__main__':
# Start with the empty list
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
'''
Three nodes have been created.
We have references to these three blocks as first,
second and third
llist.head second third
| | |
| | |
+----+------+ +----+------+ +----+------+
| 1 | None | | 2 | None | | 3 | None |
+----+------+ +----+------+ +----+------+
'''
llist.head.next = second # Link first node with second
second.next = third # Link second node with third
'''
Now next of second Node refers to third. So all three
nodes are linked.
llist.head second third
| | |
| | |
+----+------+ +----+------+ +----+------+
| 1 | o-------->| 2 | o-------->| 3 | null |
+----+------+ +----+------+ +----+------+
'''
llist.printList() | [
"[email protected]"
] | |
97463545ddff2a48e01f278692300f45fc2c3acb | f61e88315c7e83e4f9ab430fc7e8db6fc964fccd | /chapter4/lessons/plot_multiple_function.py | ec2e349f946ddadd1e4997746be8ab46a55aed28 | [] | no_license | KentFujii/doing_math | 6a08fbe892ee1045cc75fe02c9c75307463d1f65 | baa84e8b37e45802c2c4fcd4300b915725f3a34a | refs/heads/master | 2021-01-19T05:53:37.948603 | 2016-07-10T11:07:57 | 2016-07-10T11:07:57 | 61,952,634 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | # from sympy.plotting import plot
# from sympy import Symbol
# x = Symbol('x')
# plot(2*x+3, 3*x+1)
from sympy.plotting import plot
from sympy import Symbol
x = Symbol('x')
p = plot(2*x+3, 3*x+1, legend=True, show=False)
p[0].line_color = 'b'
p[1].line_color = 'r'
p.show()
| [
"[email protected]"
] | |
aaebcd30e1283732990421e052eb0d5cecb7a098 | f2abbeb892780b584feb2fd94e7ec5da8ecdc763 | /exporter/opentelemetry-exporter-otlp-proto-http/setup.py | 510eceba6c5abfb14c1de8ec0b03b368df4c4f0c | [
"Apache-2.0"
] | permissive | SigNoz/opentelemetry-python | 6fa5fd92584d2fb3ca71c958004cd56332c764a7 | 9e397c895797891b709a9f1c68345e9a1c357ad8 | refs/heads/main | 2023-07-15T10:43:17.064286 | 2021-09-02T12:25:18 | 2021-09-02T12:25:18 | 401,617,913 | 1 | 0 | Apache-2.0 | 2021-08-31T07:49:24 | 2021-08-31T07:49:24 | null | UTF-8 | Python | false | false | 943 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR,
"src",
"opentelemetry",
"exporter",
"otlp",
"proto",
"http",
"version.py",
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(version=PACKAGE_INFO["__version__"])
| [
"[email protected]"
] | |
8a6da7cba87dcb36e8601794d88de49835fd7a3b | 151359bf8f14964849a313585dcaee3698a4b6b7 | /bin/painter.py | 3771af86ba457bce640a01094b034b01566e2de0 | [] | no_license | deepak9807/blog-api | 28b3c41cea76a04eeb0c395fc1d5c96bee58396f | 11669ead6fdb2e1c10a3c98314a3605d6bf9318d | refs/heads/master | 2021-09-09T17:35:56.575272 | 2018-03-18T13:03:14 | 2018-03-18T13:03:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | #!/home/deepak/blog-api/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"[email protected]"
] | |
d3e7e9dae606fe6dc77d9c43997e9c592fbcd477 | 982bc95ab762829c8b6913e44504415cdd77241a | /account_easy_reconcile/base_reconciliation.py | b50c06b9eed699d96da272f0fb9dd9613177c235 | [] | no_license | smart-solution/natuurpunt-finance | 6b9eb65be96a4e3261ce46d7f0c31de3589e1e0d | 6eeb48468792e09d46d61b89499467a44d67bc79 | refs/heads/master | 2021-01-23T14:42:05.017263 | 2020-11-03T15:56:35 | 2020-11-03T15:56:35 | 39,186,046 | 0 | 1 | null | 2020-11-03T15:56:37 | 2015-07-16T08:36:54 | Python | UTF-8 | Python | false | false | 7,776 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2012-2013 Camptocamp SA (Guewen Baconnier)
# Copyright (C) 2010 Sébastien Beau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from operator import itemgetter, attrgetter
class easy_reconcile_base(orm.AbstractModel):
"""Abstract Model for reconciliation methods"""
_name = 'easy.reconcile.base'
_inherit = 'easy.reconcile.options'
_columns = {
'account_id': fields.many2one(
'account.account', 'Account', required=True),
'partner_ids': fields.many2many(
'res.partner', string="Restrict on partners"),
# other columns are inherited from easy.reconcile.options
}
def automatic_reconcile(self, cr, uid, ids, context=None):
""" Reconciliation method called from the view.
:return: list of reconciled ids, list of partially reconciled items
"""
if isinstance(ids, (int, long)):
ids = [ids]
assert len(ids) == 1, "Has to be called on one id"
rec = self.browse(cr, uid, ids[0], context=context)
return self._action_rec(cr, uid, rec, context=context)
def _action_rec(self, cr, uid, rec, context=None):
""" Must be inherited to implement the reconciliation
:return: list of reconciled ids
"""
raise NotImplementedError
def _base_columns(self, rec):
""" Mandatory columns for move lines queries
An extra column aliased as ``key`` should be defined
in each query."""
aml_cols = (
'id',
'debit',
'credit',
'date',
'period_id',
'ref',
'name',
'partner_id',
'account_id',
'move_id')
return ["account_move_line.%s" % col for col in aml_cols]
def _select(self, rec, *args, **kwargs):
return "SELECT %s" % ', '.join(self._base_columns(rec))
def _from(self, rec, *args, **kwargs):
return "FROM account_move_line"
def _where(self, rec, *args, **kwargs):
where = ("WHERE account_move_line.account_id = %s "
"AND account_move_line.reconcile_id IS NULL ")
# it would be great to use dict for params
# but as we use _where_calc in _get_filter
# which returns a list, we have to
# accomodate with that
params = [rec.account_id.id]
if rec.partner_ids:
where += " AND account_move_line.partner_id IN %s"
params.append(tuple([l.id for l in rec.partner_ids]))
return where, params
def _get_filter(self, cr, uid, rec, context):
ml_obj = self.pool.get('account.move.line')
where = ''
params = []
if rec.filter:
dummy, where, params = ml_obj._where_calc(
cr, uid, eval(rec.filter), context=context).get_sql()
if where:
where = " AND %s" % where
return where, params
def _below_writeoff_limit(self, cr, uid, rec, lines,
writeoff_limit, context=None):
precision = self.pool.get('decimal.precision').precision_get(
cr, uid, 'Account')
keys = ('debit', 'credit')
sums = reduce(
lambda line, memo:
dict((key, value + memo[key])
for key, value
in line.iteritems()
if key in keys), lines)
debit, credit = sums['debit'], sums['credit']
writeoff_amount = round(debit - credit, precision)
return bool(writeoff_limit >= abs(writeoff_amount)), debit, credit
def _get_rec_date(self, cr, uid, rec, lines,
based_on='end_period_last_credit', context=None):
period_obj = self.pool.get('account.period')
def last_period(mlines):
period_ids = [ml['period_id'] for ml in mlines]
periods = period_obj.browse(
cr, uid, period_ids, context=context)
return max(periods, key=attrgetter('date_stop'))
def last_date(mlines):
return max(mlines, key=itemgetter('date'))
def credit(mlines):
return [l for l in mlines if l['credit'] > 0]
def debit(mlines):
return [l for l in mlines if l['debit'] > 0]
if based_on == 'end_period_last_credit':
return last_period(credit(lines)).date_stop
if based_on == 'end_period':
return last_period(lines).date_stop
elif based_on == 'newest':
return last_date(lines)['date']
elif based_on == 'newest_credit':
return last_date(credit(lines))['date']
elif based_on == 'newest_debit':
return last_date(debit(lines))['date']
# reconcilation date will be today
# when date is None
return None
def _reconcile_lines(self, cr, uid, rec, lines, allow_partial=False, context=None):
""" Try to reconcile given lines
:param list lines: list of dict of move lines, they must at least
contain values for : id, debit, credit
:param boolean allow_partial: if True, partial reconciliation will be
created, otherwise only Full
reconciliation will be created
:return: tuple of boolean values, first item is wether the items
have been reconciled or not,
the second is wether the reconciliation is full (True)
or partial (False)
"""
if context is None:
context = {}
ml_obj = self.pool.get('account.move.line')
writeoff = rec.write_off
line_ids = [l['id'] for l in lines]
below_writeoff, sum_debit, sum_credit = self._below_writeoff_limit(
cr, uid, rec, lines, writeoff, context=context)
date = self._get_rec_date(
cr, uid, rec, lines, rec.date_base_on, context=context)
rec_ctx = dict(context, date_p=date)
if below_writeoff:
if sum_credit < sum_debit:
writeoff_account_id = rec.account_profit_id.id
else:
writeoff_account_id = rec.account_lost_id.id
period_id = self.pool.get('account.period').find(
cr, uid, dt=date, context=context)[0]
ml_obj.reconcile(
cr, uid,
line_ids,
type='auto',
writeoff_acc_id=writeoff_account_id,
writeoff_period_id=period_id,
writeoff_journal_id=rec.journal_id.id,
context=rec_ctx)
return True, True
elif allow_partial:
ml_obj.reconcile_partial(
cr, uid,
line_ids,
type='manual',
context=rec_ctx)
return True, False
return False, False
| [
"[email protected]"
] | |
ae535fe72253b6c574f7196c75a3b64e003c3ea3 | ccb6918eff9624bc890c4318462b3d04fe01ab25 | /d02/for/for/settings.py | 763917cea83d3de15fae9c387027213bdac3fd6e | [] | no_license | shchliu/19django | 431202f3b4a71fb2614f3f113174df327a338413 | 63af6aeff279a83fb170c1b5385d0804d96fafad | refs/heads/master | 2020-08-15T08:53:36.707823 | 2019-10-16T08:26:41 | 2019-10-16T08:28:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | """
Django settings for for project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n$s!ww49p_&vb4(^$4-n#s(98qsu+(61j_2w2)&7pbx+3(k_x+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'for.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'for.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
91f4700c570703a70f381ca3bc0990abbefa67fc | 0ff0150c61bec1768db86f1b9f7ef82b8f743f28 | /EX45/mansion.py | af8eee14a4587e4d26d772dc802816e1aa23c190 | [] | no_license | VanessaTan/LPTHW | 31a330ba304d3736ca87ce7943d8efe1f2db74d2 | b615c1d65b483a5bf64d8a0ec5462a365e609134 | refs/heads/master | 2021-01-12T17:22:53.414654 | 2016-12-20T19:02:41 | 2016-12-20T19:02:41 | 71,553,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,778 | py | class Scene(object):
def enter(self):
print "She found you..."
exit(1)
import mansionupstairs
class Mansion(Scene):
def __init__(self):
super(Mansion, self).__init__()
self.Mansion = Mansion
def insertnamehere(self):
print "You are scared. But you gather your shit together and approach catiously."
print "You tip-toe through the back door and sneak in."
print "You hear footsteps upstairs and the sound of something being dragged."
print "You then hear someone rush down the stairs."
print "Where do you hide?!?!"
print "There's a closet or you can hide behind the door."
action = raw_input("> ")
if action == "closet":
print "You rush in the closet as quietly as possible."
print "You close the doors after you but leave a crack open."
print "Through the crack, you see a monster walk past."
print "You are scared shitless."
print "You start breathing heavily."
print "The monster suddenly turns his head towards your direction."
print "She smiles and opens the closet door."
print " 'I found you... ' "
return 'death'
elif action == "door":
print "You rush behind the door."
print "You hold your breath as you hear the footsteps stop nearby."
print "You hear a female voice say 'I'll find you..."
print "The footsteps stomp away outside."
print "You emerge from hiding."
mansionupstairs.MansionUpstairs().insertnamehere()
else:
print "Just choose between the door or closet. It's not that hard."
mansion.Mansion().insertnamehere()
| [
"[email protected]"
] | |
b6e187de710d37037dd7c0d830a50e7eaee1aa28 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/xcoff/XCoffSectionHeaderFlags.pyi | 43a745532a3157885655ec9c25a175e6ac3df2ec | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 772 | pyi | import java.lang
class XCoffSectionHeaderFlags(object):
STYP_BSS: int = 128
STYP_DATA: int = 64
STYP_DEBUG: int = 8192
STYP_EXCEPT: int = 128
STYP_INFO: int = 512
STYP_LOADER: int = 4096
STYP_OVRFLO: int = 32768
STYP_PAD: int = 8
STYP_TEXT: int = 32
STYP_TYPCHK: int = 16384
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
34d5caf6ffdd22eaa6b316c1cc63b54cb3cd702a | d6c718a6738edcdd2b54502c8a4a0896500a51bb | /Day-06/有效互动比函数化.py | bd66eeebc2a51a36dae32eb3e6e566de216394c8 | [] | no_license | ChangxingJiang/Python-DM-Homework-W1-W3 | f895cf0fa7a2eb10df7f6693b47a02a2e107f122 | 5fd118a974ce37fb88dedd18581bf2f4b2d5a71a | refs/heads/master | 2022-09-21T18:56:13.775894 | 2020-05-25T21:55:24 | 2020-05-25T21:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | def ratio(a,b):
return a/b
num1 = int(input("请输入粉丝量:"))
num2 = int(input("请输入互动量:"))
print(ratio(num2,num1)) | [
"[email protected]"
] | |
ce42ed7f15ab68df41c64c17c877f642173d66a2 | a7cca49626a3d7100e9ac5c2f343c351ecb76ac7 | /upydev/argcompleter.py | 3751c0a54657cd37a9a63de43d2f4f77ad8882e7 | [
"MIT"
] | permissive | Carglglz/upydev | 104455d77d64300074bda54d86bd791f19184975 | 529aa29f3e1acf8160383fe410b5659110dc96de | refs/heads/master | 2023-05-24T18:38:56.242500 | 2022-10-21T14:03:17 | 2022-10-21T14:03:17 | 199,335,165 | 49 | 9 | MIT | 2022-10-21T14:03:18 | 2019-07-28T20:42:00 | Python | UTF-8 | Python | false | false | 57,893 | py | import os
from upydev import __path__
UPYDEV_PATH = __path__[0]
# SHELL_CMD_PARSER
shell_commands = ['cd', 'mkdir', 'cat', 'head', 'rm', 'rmdir', 'pwd',
'run', 'mv']
custom_sh_cmd_kw = ['df', 'datetime', 'ifconfig', 'net',
'ap', 'mem', 'install', 'touch',
'exit', 'lpwd', 'lsl', 'lcd', 'put', 'get', 'ls',
'set', 'tree', 'dsync', 'reload', 'docs',
'du', 'ldu', 'upip', 'uping',
'timeit', 'i2c',
'upy-config', 'jupyterc', 'pytest', 'rssi',
'info', 'id', 'uhelp', 'modules', 'shasum', 'vim',
'update_upyutils', 'mdocs', 'ctime', 'enable_sh',
'diff', 'config', 'fw', 'mpyx', 'sd', 'uptime', 'cycles', 'play']
LS = dict(help="list files or directories",
subcmd=dict(help='indicate a file/dir or pattern to see', default=[],
metavar='file/dir/pattern', nargs='*'),
options={"-a": dict(help='list hidden files', required=False,
default=False,
action='store_true'),
"-d": dict(help='depth level', required=False,
default=0,
type=int)})
HEAD = dict(help="display first lines of a file",
subcmd=dict(help='indicate a file or pattern to see', default=[],
metavar='file/pattern', nargs='*'),
options={"-n": dict(help='number of lines to print', required=False,
default=10,
type=int)})
CAT = dict(help="concatenate and print files",
subcmd=dict(help='indicate a file or pattern to see', default=[],
metavar='file/pattern', nargs='*'),
options={"-d": dict(help='depth level', required=False,
default=0,
type=int)})
MKDIR = dict(help="make directories",
subcmd=dict(help='indicate a dir/pattern to create', default=[],
metavar='dir', nargs='*'),
options={})
CD = dict(help="change current working directory",
subcmd=dict(help='indicate a dir to change to', default='/',
metavar='dir', nargs='?'),
options={})
MV = dict(help="move/rename a file",
subcmd=dict(help='indicate a file to rename', default=[],
metavar='file', nargs=2),
options={})
PWD = dict(help="print current working directory",
subcmd={},
options={})
RM = dict(help="remove file or pattern of files",
subcmd=dict(help='indicate a file/pattern to remove', default=[],
metavar='file/dir/pattern', nargs='+'),
options={"-rf": dict(help='remove recursive force a dir or file',
required=False,
default=False,
action='store_true'),
"-d": dict(help='depth level search', required=False,
default=0,
type=int),
"-dd": dict(help='filter for directories only', required=False,
default=False,
action='store_true')})
RMDIR = dict(help="remove directories or pattern of directories",
subcmd=dict(help='indicate a dir/pattern to remove', default=[],
metavar='dir', nargs='+'),
options={"-d": dict(help='depth level search', required=False,
default=0,
type=int)})
DU = dict(help="display disk usage statistics",
subcmd=dict(help='indicate a dir to see usage', default='',
metavar='dir', nargs='?'),
options={"-d": dict(help='depth level', required=False,
default=0,
type=int),
"-p": dict(help='pattern to match', required=False,
default=[],
nargs='*')})
TREE = dict(help="list contents of directories in a tree-like format",
subcmd=dict(help='indicate a dir to see', default='',
metavar='dir', nargs='?'),
options={"-a": dict(help='list hidden files', required=False,
default=False,
action='store_true')})
DF = dict(help="display free disk space",
subcmd={},
options={})
MEM = dict(help="show ram usage info",
subcmd=dict(help='{info , dump}; default: info',
default='info',
metavar='action', choices=['info', 'dump'], nargs='?'),
options={})
EXIT = dict(help="exit upydev shell",
subcmd={},
options={"-r": dict(help='soft-reset after exit', required=False,
default=False,
action='store_true'),
"-hr": dict(help='hard-reset after exit', required=False,
default=False,
action='store_true')})
VIM = dict(help="use vim to edit files",
subcmd=dict(help='indicate a file to edit', default='',
metavar='file', nargs='?'),
options={"-rm": dict(help='remove local copy after upload', required=False,
default=False,
action='store_true'),
"-e": dict(help='execute script after upload', required=False,
default=False,
action='store_true'),
"-r": dict(help='reload script so it can run again',
required=False,
default=False,
action='store_true'),
"-o": dict(help='override local copy if present',
required=False,
default=False,
action='store_true'),
"-d": dict(help=('use vim diff between device and local files'
', if same file name device file is ~file'),
required=False,
default=[],
nargs='+')})
DIFF = dict(help=("use git diff between device's [~file/s] and local file/s"),
subcmd=dict(help='indicate files to compare or pattern', default=['*', '*'],
metavar='fileA fileB', nargs='+'),
options={"-s": dict(help='switch file comparison',
required=False,
default=False,
action='store_true')})
RUN = dict(help="run device's scripts",
subcmd=dict(help='indicate a file/script to run', default='',
metavar='file'),
options={"-r": dict(help='reload script so it can run again',
required=False,
default=False,
action='store_true'),
})
RELOAD = dict(help="reload device's scripts",
subcmd=dict(help='indicate a file/script to reload', default='',
metavar='file', nargs=1),
options={})
LCD = dict(help="change local current working directory",
subcmd=dict(help='indicate a dir to change to', default='',
metavar='dir', nargs='?'),
options={})
LSL = dict(help="list local files or directories",
subcmd=dict(help='indicate a file/dir or pattern to see', default=[],
metavar='file/dir/pattern', nargs='*'),
options={"-a": dict(help='list hidden files', required=False,
default=False,
action='store_true')})
LPWD = dict(help="print local current working directory",
subcmd={},
options={})
LDU = dict(help="display local disk usage statistics",
subcmd=dict(help='indicate a dir to see usage', default='',
metavar='dir', nargs='?'),
options={"-d": dict(help='depth level', required=False,
default=0,
type=int)})
INFO = dict(help="prints device's info",
subcmd={},
options={})
ID = dict(help="prints device's unique id",
subcmd={},
options={})
UHELP = dict(help="prints device's help info",
subcmd={},
options={})
MODULES = dict(help="prints device's frozen modules",
subcmd={},
options={})
UPING = dict(help="device send ICMP ECHO_REQUEST packets to network hosts",
subcmd=dict(help='indicate an IP address to ping; default: host IP',
default='host',
metavar='IP', nargs='?'),
options={})
RSSI = dict(help="prints device's RSSI (WiFi or BLE)",
subcmd={},
options={})
NET = dict(help="manage network station interface (STA._IF)",
desc="enable/disable station inteface, config and connect to or scan APs",
subcmd=dict(help='{status, on, off, config, scan}; default: status',
default='status',
metavar='command',
choices=['status', 'on', 'off', 'config', 'scan'],
nargs='?'),
options={"-wp": dict(help='ssid, password for config command',
required=False,
nargs=2)})
IFCONFIG = dict(help="prints network interface configuration (STA._IF)",
subcmd={},
options={"-t": dict(help='print info in table format',
required=False,
default=False,
action='store_true')})
AP = dict(help="manage network acces point interface (AP._IF)",
desc="enable/disable ap inteface, config an AP or scan connected clients",
subcmd=dict(help='{status, on, off, scan, config}; default: status',
default='status',
metavar='command',
choices=['status', 'on', 'off', 'config', 'scan'],
nargs='?'),
options={"-ap": dict(help='ssid, password for config command',
required=False,
nargs=2),
"-t": dict(help='print info in table format',
required=False,
default=False,
action='store_true')})
I2C = dict(help="manage I2C interface",
subcmd=dict(help='{config, scan}; default: config',
default='config',
metavar='action',
choices=['config', 'scan'],
nargs='?'),
options={"-i2c": dict(help='[scl] [sda] for config command',
required=False,
default=[22, 23],
nargs=2)})
SET = dict(help="set device's configuration {rtc, hostname, localname}",
subcmd=dict(help=('set parameter configuration {rtc localtime, rtc ntptime,'
' hostname, localname}; default: rtc localtime'),
default=['rtc'],
metavar='parameter', nargs='+'),
options={"-utc": dict(help='[utc] for "set ntptime" '
'command', required=False, nargs=1, type=int)},
alt_ops=['rtc', 'localtime', 'ntptime', 'hostname', 'localname'])
DATETIME = dict(help="prints device's RTC time",
subcmd={},
options={})
UPTIME = dict(help=("prints device's uptime since latest boot, "
"(requires uptime.py and uptime.settime()"
" at boot.py/main.py)"),
subcmd={},
options={})
CYCLES = dict(help=("prints device's cycle count"
"(requires cycles.py and cycles.set()"
" at boot.py/main.py)"),
subcmd={},
options={})
SHASUM = dict(help="shasum SHA-256 tool",
subcmd=dict(help='Get the hash of a file or check a shasum file',
default=[],
metavar='file/pattern',
nargs='*'),
options={"-c": dict(help='check a shasum file',
required=False,
default='')})
TOUCH = dict(help="create a new file",
subcmd=dict(help='indicate a new file/pattern to create',
default=[],
metavar='file/pattern',
nargs='*'),
options={})
UPIP = dict(help="install or manage MicroPython libs",
subcmd=dict(help='indicate a lib/module to {install, info, find}',
default=[],
metavar='file/pattern',
nargs='*'),
options={},
alt_ops=['install', 'info', 'find'])
TIMEIT = dict(help="measure execution time of a script/function",
subcmd=dict(help='indicate a script/function to measure',
default=[],
metavar='script/function',
nargs='*'),
options={})
UPDATE_UPYUTILS = dict(help="update upyutils scripts",
subcmd=dict(help=("filter to match one/multiple "
"upyutils; default: all"),
default=['*'],
nargs='*',
metavar='name/pattern'),
options={},
alt_ops=os.listdir(os.path.join(UPYDEV_PATH,
'upyutils_dir')))
ENABLE_SHELL = dict(help="upload required files so shell is fully operational",
subcmd={},
options={})
DOCS = dict(help="see upydev docs at https://upydev.readthedocs.io/en/latest/",
subcmd=dict(help='indicate a keyword to search',
metavar='keyword', nargs='?'),
options={})
MDOCS = dict(help="see MicroPython docs at docs.micropython.org",
subcmd=dict(help='indicate a keyword to search',
metavar='keyword', nargs='?'),
options={})
CTIME = dict(help="measure execution time of a shell command",
subcmd=dict(help='indicate a command to measure',
default='info',
choices=shell_commands+custom_sh_cmd_kw,
metavar='command'),
options={})
CONFIG = dict(help="set or check config (from *_config.py files)#",
desc="* needs config module\n* to set config --> [config]: "
"[parameter]=[value]",
subcmd=dict(help='indicate parameter to set or check ',
default=[],
metavar='parameter',
nargs='*'),
options={"-y": dict(help='print config in YAML format',
required=False,
default=False,
action='store_true')})
SD = dict(help="commands to manage an sd",
desc='enable an sd module, mount/unmount an sd or auto mount/unmount sd\n\n'
'* auto command needs SD_AM.py in device',
subcmd=dict(help='actions to mount/unmount sd : {enable, init, deinit, auto}',
default='enable',
choices=['enable', 'init', 'deinit', 'auto'],
metavar='command'),
options={"-po": dict(help='pin of LDO 3.3V regulator to enable',
default=15,
type=int),
"-sck": dict(help='sck pin for sd SPI',
default=5,
type=int),
"-mosi": dict(help='mosi pin for sd SPI',
default=18,
type=int),
"-miso": dict(help='miso pin for sd SPI',
default=19,
type=int),
"-cs": dict(help='cs pin for sd SPI',
default=21,
type=int)})
LOAD = dict(help="run local script in device",
desc="load a local script in device buffer and execute it.",
subcmd=dict(help='indicate a file/script to load', default='',
metavar='file',
nargs='*'),
options={})
SHELL_CMD_DICT_PARSER = {"ls": LS, "head": HEAD, "cat": CAT, "mkdir": MKDIR,
"touch": TOUCH, "cd": CD, "mv": MV, "pwd": PWD,
"rm": RM, "rmdir": RMDIR, "du": DU,
"tree": TREE, "df": DF, "mem": MEM, "exit": EXIT,
"vim": VIM, "run": RUN, "reload": RELOAD,
"info": INFO, "id": ID, "uhelp": UHELP, "modules": MODULES,
"uping": UPING, "rssi": RSSI, "net": NET, "ifconfig": IFCONFIG,
"ap": AP, "i2c": I2C, "set": SET, "datetime": DATETIME,
"shasum": SHASUM, "upip": UPIP, "timeit": TIMEIT,
"update_upyutils": UPDATE_UPYUTILS,
"lcd": LCD,
"lsl": LSL, "lpwd": LPWD, "ldu": LDU, "docs": DOCS,
"mdocs": MDOCS, "ctime": CTIME, "enable_sh": ENABLE_SHELL,
"diff": DIFF, "config": CONFIG, "sd": SD, 'uptime': UPTIME,
"cycles": CYCLES, "load": LOAD}
# DEBUGGING
PING = dict(help="ping the device to test if device is"
" reachable, CTRL-C to stop.",
desc="this sends ICMP ECHO_REQUEST packets to device",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-zt": dict(help='internal flag for zerotierone device',
required=False,
default=False,
action='store_true')})
PROBE = dict(help="to test if a device is reachable",
desc="ping, scan serial ports or ble scan depending on device type",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-zt": dict(help='internal flag for zerotierone device',
required=False,
default=False,
action='store_true'),
"-G": dict(help='internal flag for group mode',
required=False,
default=None),
"-gg": dict(help='flag for global group',
required=False,
default=False,
action='store_true'),
"-devs": dict(help='flag for filtering devs in global group',
required=False,
nargs='*')})
SCAN = dict(help="to scan for available devices, use a flag to filter for device type",
desc="\ndefault: if no flag provided will do all three scans.",
subcmd={},
options={"-sr": dict(help="scan for SerialDevice",
required=False,
default=False,
action='store_true'),
"-nt": dict(help='scan for WebSocketDevice',
required=False,
default=False,
action='store_true'),
"-bl": dict(help='scan for BleDevice',
required=False,
default=False,
action='store_true')})
RUN = dict(help="run a script in device, CTRL-C to stop",
desc="this calls 'import [script]' in device and reloads it at the end",
subcmd=dict(help=('indicate a script to run'),
metavar='script'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-s": dict(help='indicate the path of the script if in external fs'
' e.g. an sd card.',
required=False)})
PLAY = dict(help="play custom tasks in ansible playbook style",
desc="task must be yaml file with name, hosts, tasks, name, command\n"
"structure",
subcmd=dict(help=('indicate a task file to play.'),
metavar='task',
choices=["add", "rm", "list"]),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true')})
TIMEIT = dict(help="to measure execution time of a module/script",
desc="source: https://github.com/peterhinch/micropython-samples"
"/tree/master/timed_function",
subcmd=dict(help=('indicate a script to run'),
metavar='script'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-s": dict(help='indicate the path of the script if in external'
' fs e.g. an sd card.',
required=False)})
STREAM_TEST = dict(help="to test download speed (from device to host)",
desc="default: 10 MB of random bytes are sent in chunks of 20 kB "
"and received in chunks of 32 kB.\n\n*(sync_tool.py required)",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-chunk_tx": dict(help='chunk size of data packets in kB to'
' send',
required=False, default=20, type=int),
"-chunk_rx": dict(help='chunk size of data packets in kB to'
' receive',
required=False, default=32, type=int),
"-total_size": dict(help='total size of data packets in MB',
required=False, default=10, type=int)})
SYSCTL = dict(help="to start/stop a script without following the output",
desc="to follow initiate repl",
mode=dict(help='indicate a mode {start,stop}',
metavar='mode',
choices=['start', 'stop']),
subcmd=dict(help='indicate a script to start/stop',
metavar='script'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true')})
LOG = dict(help="to log the output of a script running in device",
desc="log levels (sys.stdout and file), run modes (normal, daemon) are"
"available through following options",
subcmd=dict(help=('indicate a script to run and log'),
metavar='script'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-s": dict(help='indicate the path of the script if in external fs'
' e.g. an sd card.',
required=False),
"-dflev": dict(help='debug file mode level; default: error',
default='error',
choices=['debug', 'info', 'warning', 'error',
'critical']),
"-dslev": dict(help='debug sys.stdout mode level; default: debug',
default='debug',
choices=['debug', 'info', 'warning', 'error',
'critical']),
"-daemon": dict(help='enable "daemon mode", uses nohup so this '
'means running in background, output if any is'
' redirected to [SCRIPT_NAME]_daemon.log',
default=False, action='store_true'),
"-stopd": dict(help='To stop a log daemon script',
default=False, action='store_true'),
"-F": dict(help='To follow a daemon log script file',
action='store_true',
default=False)})
PYTEST = dict(help="run tests on device with pytest (use pytest setup first)",
subcmd=dict(help='indicate a test script to run, any optional '
'arg is passed to pytest',
default=[''],
metavar='test',
nargs='*'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true')})
DB_CMD_DICT_PARSER = {"ping": PING, "probe": PROBE, "scan": SCAN, "run": RUN,
"timeit": TIMEIT, "stream_test": STREAM_TEST, "sysctl": SYSCTL,
"log": LOG, "pytest": PYTEST, "play": PLAY}
# DEVICE MANAGEMENT
CONFIG = dict(help="to save device settings",
desc="this will allow set default device configuration or \n"
"target a specific device in a group.\n"
"\ndefault: a configuration file 'upydev_.config' is saved in\n"
"current working directory, use -[options] for custom configuration",
subcmd={},
options={"-t": dict(help="device target address"),
"-p": dict(help='device password or baudrate'),
"-g": dict(help='save configuration in global path',
required=False,
default=False,
action='store_true'),
"-gg": dict(help='save device configuration in global group',
required=False,
default=False,
action='store_true'),
"-@": dict(help='specify a device name',
required=False),
"-zt": dict(help='zerotierone device configuration',
required=False),
"-sec": dict(help='introduce password with no echo',
required=False,
default=False,
action='store_true')})
CHECK = dict(help='to check device information',
desc='shows current device information or specific device\n'
'indicated with -@ option if it is stored in the global group.',
subcmd={},
options={"-@": dict(help='specify device/s name',
required=False,
nargs='+'),
"-i": dict(help='if device is online/connected gets device info',
required=False,
default=False,
action='store_true'),
"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true'),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-G": dict(help='specify a group, default: global group',
required=False)})
SET = dict(help='to set current device configuration',
subcmd={},
options={"-@": dict(help='specify device name',
required=False),
"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true'),
"-G": dict(help='specify a group, default: global group',
required=False)})
REGISTER = dict(help='to register a device/group as a shell function so it is callable',
subcmd=dict(help='alias for device/s or group',
metavar='alias',
nargs='*'),
options={"-@": dict(help='specify device name',
required=False,
nargs='+'),
"-gg": dict(help='register a group of devices',
required=False,
default=False,
action='store_true'),
"-s": dict(help='specify a source file, default: ~/.profile',
required=False),
"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true')})
LSDEVS = dict(help='to see registered devices or groups',
desc='this also defines a shell function with the same name in the source'
' file',
subcmd={},
options={"-s": dict(help='specify a source file, default: ~/.profile',
required=False),
"-G": dict(help='specify a group, default: global group',
required=False)})
MKG = dict(help='make a group of devices',
desc='this save a config file with devices settings so they can be targeted'
' all together',
subcmd=dict(help='group name',
metavar='group'),
options={"-g": dict(help='save configuration in global path',
required=False,
default=False,
action='store_true'),
"-devs": dict(help='device configuration [name] [target] '
'[password]',
required=False,
nargs='+')})
GG = dict(help='to see global group of devices',
subcmd={},
options={"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true')})
SEE = dict(help='to see a group of devices',
subcmd=dict(help='indicate a group name',
metavar='group'),
options={"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true')})
MGG = dict(help='manage a group of devices',
desc='add/remove one or more devices to/from a group',
subcmd=dict(help='group name',
metavar='group',
default='UPY_G',
nargs='?'),
options={"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true'),
"-add": dict(help='add device/s name',
required=False,
nargs='*'),
"-rm": dict(help='remove device/s name',
required=False,
nargs='*'),
"-gg": dict(help='manage global group',
required=False,
default=False,
action='store_true')})
MKSG = dict(help='manage a subgroup of devices',
desc='make group from another group with a subset of devices',
subcmd=dict(help='group name',
metavar='group',
default='UPY_G',
nargs='?'),
sgroup=dict(help='subgroup name',
metavar='subgroup'),
options={"-g": dict(help='looks for configuration in global path',
required=False,
default=False,
action='store_true'),
"-devs": dict(help='add device/s name',
required=True,
nargs='*'),
"-gg": dict(help='manage global group',
required=False,
default=False,
action='store_true')})
DM_CMD_DICT_PARSER = {"config": CONFIG, "check": CHECK,
"register": REGISTER, "lsdevs": LSDEVS, "mkg": MKG, "gg": GG,
"see": SEE, "mgg": MGG, "mksg": MKSG}
# FW
MPYX = dict(help="freeze .py files using mpy-cross. (must be available in $PATH)",
subcmd=dict(help='indicate a file/pattern to '
'compile',
default=[],
metavar='file/pattern',
nargs='+'),
options={})
FW = dict(help="list or get available firmware from micropython.org",
subcmd=dict(help=('{list, get, update}'
'; default: list'),
default=['list'],
metavar='action', nargs='*'),
options={"-b": dict(help='to indicate device platform',
required=False),
"-n": dict(help='to indicate keyword for filter search',
required=False),
"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true')},
alt_ops=['list', 'get', 'update', 'latest'])
FLASH = dict(help="to flash a firmware file using available serial tools "
"(esptool.py, pydfu.py)",
subcmd=dict(help=('indicate a firmware file to flash'),
metavar='firmware file'),
options={"-i": dict(help='to check wether device platform and '
'firmware file name match',
required=False,
action='store_true'),
"-t": dict(help="device target address",
required=True),
"-p": dict(help='device baudrate',
required=True),
})
OTA = dict(help="to flash a firmware file using OTA system (ota.py, otable.py)",
subcmd=dict(help=('indicate a firmware file to flash'),
metavar='firmware file'),
options={"-i": dict(help='to check wether device platform and '
'firmware file name match',
required=False,
action='store_true'),
"-sec": dict(help='to enable OTA TLS',
required=False,
default=False,
action='store_true'),
"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-zt": dict(help='zerotierone host IP',
required=False,
default=False)})
FW_CMD_DICT_PARSER = {"mpyx": MPYX, "fwr": FW, "flash": FLASH, "ota": OTA}
# GC
RESET = dict(help="reset device",
subcmd={},
options={"-hr": dict(help='to do hard reset',
required=False,
default=False,
action='store_true')})
CONFIG = dict(help="set or check config (from *_config.py files)#",
desc="to set config --> [config]: [parameter]=[value]",
subcmd=dict(help='indicate parameter to set or check ',
default=[],
metavar='parameter',
nargs='*'),
options={"-y": dict(help='print config in YAML format',
required=False,
default=False,
action='store_true')})
KBI = dict(help="to send KeyboardInterrupt to device",
subcmd={},
options={})
UPYSH = dict(help="import upysh",
subcmd={},
options={})
GC_CMD_DICT_PARSER = {"reset": RESET, "uconfig": CONFIG, "kbi": KBI, "upysh": UPYSH}
# KG
KG = dict(help="to generate a key pair (RSA) or key & certificate (ECDSA) for ssl",
desc="generate key pair and exchange with device, or refresh WebREPL "
"password",
mode=dict(help='indicate a key {rsa, ssl, wr}',
metavar='mode',
choices=['rsa', 'ssl', 'wr'],
nargs='?'),
subcmd=dict(help='- gen: generate a ECDSA key/cert (default)'
'\n- rotate: To rotate CA key/cert pair old->new or'
' new->old'
'\n- add: add a device cert to upydev path verify location.'
'\n- export: export CA or device cert to cwd.',
metavar='subcmd',
# just for arg completion
choices=['gen', 'add', 'export', 'rotate', 'dev', 'host', 'CA',
'status'],
default='gen',
nargs='?'),
dst=dict(help='indicate a subject: {dev, host, CA}, default: dev',
metavar='dest',
choices=['dev', 'host', 'CA'],
default='dev',
nargs='?'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-zt": dict(help='internal flag for zerotierone device',
required=False,
default=False,
action='store_true'),
"-rst": dict(help='internal flag for reset',
required=False,
default=False,
action='store_true'),
"-key_size": dict(help="RSA key size, default:2048",
default=2048,
required=False,
type=int),
"-show_key": dict(help='show generated RSA key',
required=False,
default=False,
action='store_true'),
"-tfkey": dict(help='transfer keys to device',
required=False,
default=False,
action='store_true'),
"-rkey": dict(help='option to remove private device key from host',
required=False,
default=False,
action='store_true'),
"-g": dict(help='option to store new WebREPL password globally',
required=False,
default=False,
action='store_true'),
"-to": dict(help='serial device name to upload to',
required=False),
"-f": dict(help='cert name to add to verify locations',
required=False),
"-a": dict(
help="show all devs ssl cert status",
required=False,
default=False,
action="store_true",
), })
RSA = dict(help="to perform operations with RSA key pair as sign, verify or "
"authenticate",
desc="sign files, verify signatures or authenticate devices with "
"RSA challenge\nusing device keys or host keys",
mode=dict(help='indicate an action {sign, verify, auth}',
metavar='mode',
choices=['sign', 'verify', 'auth']),
subcmd=dict(help='indicate a file to sign/verify',
metavar='file/signature',
nargs='?'),
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-host": dict(help='to use host keys',
required=False,
default=False,
action='store_true'),
"-rst": dict(help='internal flag for reset',
required=False,
default=False,
action='store_true')})
KG_CMD_DICT_PARSER = {"kg": KG, "rsa": RSA}
# SHELL-REPL
SHELLREPLS = dict(help="enter shell-repl",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-rkey": dict(help='generate new password after exit '
'(WebSocketDevices)',
required=False,
action='store_true'),
"-nem": dict(help='force no encryption mode'
' (WebSocketDevices)',
required=False,
action='store_true')})
SHELL_CONFIG = dict(help="configure shell prompt colors",
desc='see\nhttps://python-prompt-toolkit.readthedocs.io/en/master/'
'pages/asking_for_input.html#colors\nfor color options',
subcmd={},
options={"--userpath": dict(help='user path color; default:'
' ansimagenta bold',
required=False,
default='ansimagenta bold'),
"--username": dict(help='user name color; default:'
' ansigreen bold',
required=False,
default='ansigreen bold'),
"--at": dict(help='@ color; default: ansigreen bold',
required=False,
default='ansigreen bold'),
"--colon": dict(help='colon color; default: white',
required=False,
default='#ffffff'),
"--pound": dict(help='pound color; default: ansiblue bold',
required=False,
default='ansiblue bold'),
"--host": dict(help='host color; default: ansigreen bold',
required=False,
default='ansigreen bold'),
"--path": dict(help='path color; default: ansiblue bold',
required=False,
default='ansiblue bold')})
SET_WSS = dict(help="toggle between WebSecREPL and WebREPL",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
})
JUPYTER = dict(help="MicroPython upydevice kernel for jupyter console, CTRL-D to exit",
subcmd={},
options={})
SHELLREPL_CMD_DICT_PARSER = {"shl": SHELLREPLS, "shl-config": SHELL_CONFIG,
"set_wss": SET_WSS,
"jupyterc": JUPYTER}
# REPL
REPLS = dict(help="enter REPL",
subcmd={},
options={"-t": dict(help="device target address",
required=True),
"-p": dict(help='device password or baudrate',
required=True),
"-wss": dict(help='use WebSocket Secure',
required=False,
default=False,
action='store_true'),
"-rkey": dict(help='generate new password after exit '
'(WebSocketDevices)',
required=False,
action='store_true')})
REPL_CMD_DICT_PARSER = {"rpl": REPLS}
# FIO
PUT = dict(help="upload files to device",
subcmd=dict(help='indicate a file/pattern/dir to '
'upload',
default=[],
metavar='file/pattern/dir',
nargs='+'),
options={"-dir": dict(help='path to upload to',
required=False,
default=''),
"-rst": dict(help='to soft reset after upload',
required=False,
default=False,
action='store_true')})
GET = dict(help="download files from device",
subcmd=dict(help='indicate a file/pattern/dir to '
'download',
default=[],
metavar='file/pattern/dir',
nargs='+'),
options={"-dir": dict(help='path to download from',
required=False,
default=''),
"-d": dict(help='depth level search for pattrn', required=False,
default=0,
type=int),
"-fg": dict(help='switch off faster get method',
required=False,
default=True,
action='store_false'),
"-b": dict(help='read buffer for faster get method', required=False,
default=512,
type=int)})
DSYNC = dict(help="recursively sync a folder from/to device's filesystem",
desc="* needs shasum.py in device",
subcmd=dict(help='indicate a dir/pattern to '
'sync',
default=['.'],
metavar='dir/pattern',
nargs='*'),
options={"-rf": dict(help='remove recursive force a dir or file deleted'
' in local/device directory',
required=False,
default=False,
action='store_true'),
"-d": dict(help='sync from device to host', required=False,
default=False,
action='store_true'),
"-fg": dict(help='switch off faster get method',
required=False,
default=True,
action='store_false'),
"-b": dict(help='read buffer for faster get method',
required=False,
default=512,
type=int),
"-t": dict(help='show tree of directory to sync', required=False,
default=False,
action='store_true'),
"-f": dict(help='force sync, no hash check', required=False,
default=False,
action='store_true'),
"-p": dict(help='show diff', required=False,
default=False,
action='store_true'),
"-n": dict(help='dry-run and save stash', required=False,
default=False,
action='store_true'),
"-i": dict(help='ignore file/dir or pattern', required=False,
default=[],
nargs='*')})
UPDATE_UPYUTILS = dict(help="update upyutils scripts",
subcmd=dict(help=("filter to match one/multiple "
"upyutils; default: all"),
default=['*'],
nargs='*',
metavar='name/pattern'),
options={},
alt_ops=os.listdir(os.path.join(UPYDEV_PATH,
'upyutils_dir')))
INSTALL = dict(help="install libraries or modules with upip to ./lib",
subcmd=dict(help='indicate a lib/module to install',
metavar='module'),
options={})
FIO_CMD_DICT_PARSER = {"put": PUT, "get": GET, "dsync": DSYNC,
"update_upyutils": UPDATE_UPYUTILS, "install": INSTALL}
ALL_PARSER = {}
ALL_PARSER.update(SHELL_CMD_DICT_PARSER)
ALL_PARSER.update(DB_CMD_DICT_PARSER)
ALL_PARSER.update(DM_CMD_DICT_PARSER)
ALL_PARSER.update(FW_CMD_DICT_PARSER)
ALL_PARSER.update(GC_CMD_DICT_PARSER)
ALL_PARSER.update(KG_CMD_DICT_PARSER)
ALL_PARSER.update(SHELLREPL_CMD_DICT_PARSER)
ALL_PARSER.update(REPL_CMD_DICT_PARSER)
ALL_PARSER.update(FIO_CMD_DICT_PARSER)
def argopts_complete(option):
if option in ALL_PARSER.keys():
opt_args = []
if ALL_PARSER[option]['subcmd']:
choices = ALL_PARSER[option]['subcmd'].get('choices')
if choices:
opt_args += choices
if 'mode' in ALL_PARSER[option].keys():
choices = ALL_PARSER[option]['mode'].get('choices')
if choices:
opt_args += choices
alt_ops = ALL_PARSER[option].get('alt_ops')
if alt_ops:
opt_args += alt_ops
kw_args = ALL_PARSER[option].get('options')
if kw_args:
opt_args += list(kw_args.keys())
return opt_args
else:
return []
def get_opts_dict(option):
kw_args = ALL_PARSER[option].get('options')
if kw_args:
return kw_args
else:
return {}
| [
"[email protected]"
] | |
a0e264d5e2ba260f7857655633539fe991807ccb | 99e4d9226e124215aaf66945cfaa5c42d18cc19f | /typings/aiohttp/helpers.pyi | 27b377a309d31a7788ba093977ec22ca796c313b | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | mathieucaroff/oxowlbot | d826423a1a4cca8a38c90383d0a71dbb40052f35 | a10c12b7c94b3e7030cef2f57c567bbd3034c8c9 | refs/heads/master | 2022-04-18T14:06:29.049957 | 2020-04-22T14:44:57 | 2020-04-22T14:44:57 | 255,177,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,994 | pyi | """
This type stub file was generated by pyright.
"""
import asyncio
import datetime
import functools
import netrc
import os
import re
import sys
import async_timeout
import attr
from collections import namedtuple
from types import TracebackType
from typing import Any, Callable, Dict, Iterable, Iterator, Mapping, Optional, Pattern, Set, Tuple, Type, TypeVar, Union
from yarl import URL
"""Various helper functions"""
__all__ = ('BasicAuth', 'ChainMapProxy')
PY_36 = sys.version_info >= (3, 6)
PY_37 = sys.version_info >= (3, 7)
PY_38 = sys.version_info >= (3, 8)
if not PY_37:
...
def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = ...) -> Set[asyncio.Task[Any]]:
...
if PY_37:
all_tasks = getattr(asyncio, 'all_tasks')
_T = TypeVar('_T')
sentinel = object()
NO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS'))
DEBUG = getattr(sys.flags, 'dev_mode', False) or not sys.flags.ignore_environment and bool(os.environ.get('PYTHONASYNCIODEBUG'))
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | chr(127)
SEPARATORS = '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', chr(9)
TOKEN = CHAR ^ CTL ^ SEPARATORS
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
@asyncio.coroutine
def noop(*args, **kwargs):
...
async def noop2(*args: Any, **kwargs: Any) -> None:
...
class BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):
"""Http basic authentication helper."""
def __new__(cls, login: str, password: str = ..., encoding: str = ...) -> BasicAuth:
...
@classmethod
def decode(cls, auth_header: str, encoding: str = ...) -> BasicAuth:
"""Create a BasicAuth object from an Authorization HTTP header."""
...
@classmethod
def from_url(cls, url: URL, *, encoding: str = ...) -> Optional[BasicAuth]:
"""Create BasicAuth from url."""
...
def encode(self) -> str:
"""Encode credentials."""
...
def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]:
...
def netrc_from_env() -> Optional[netrc.netrc]:
"""Attempt to load the netrc file from the path specified by the env-var
NETRC or in the default location in the user's home directory.
Returns None if it couldn't be found or fails to parse.
"""
...
@attr.s(frozen=True, slots=True)
class ProxyInfo:
proxy = ...
proxy_auth = ...
def proxies_from_env() -> Dict[str, ProxyInfo]:
...
def current_task(loop: Optional[asyncio.AbstractEventLoop] = ...) -> asyncio.Task:
...
def get_running_loop(loop: Optional[asyncio.AbstractEventLoop] = ...) -> asyncio.AbstractEventLoop:
...
def isasyncgenfunction(obj: Any) -> bool:
...
@attr.s(frozen=True, slots=True)
class MimeType:
type = ...
subtype = ...
suffix = ...
parameters = ...
@functools.lru_cache(maxsize=56)
def parse_mimetype(mimetype: str) -> MimeType:
"""Parses a MIME type into its components.
mimetype is a MIME type string.
Returns a MimeType object.
Example:
>>> parse_mimetype('text/html; charset=utf-8')
MimeType(type='text', subtype='html', suffix='',
parameters={'charset': 'utf-8'})
"""
...
def guess_filename(obj: Any, default: Optional[str] = ...) -> Optional[str]:
...
def content_disposition_header(disptype: str, quote_fields: bool = ..., **params: str) -> str:
"""Sets ``Content-Disposition`` header.
disptype is a disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
params is a dict with disposition params.
"""
...
class reify:
"""Use as a class method decorator. It operates almost exactly like
the Python `@property` decorator, but it puts the result of the
method it decorates into the instance dict after the first call,
effectively replacing the function it decorates with an instance
variable. It is, in Python parlance, a data descriptor.
"""
def __init__(self, wrapped: Callable[..., Any]) -> None:
self.wrapped = ...
self.__doc__ = ...
self.name = ...
def __get__(self, inst: Any, owner: Any) -> Any:
...
def __set__(self, inst: Any, value: Any) -> None:
...
reify_py = reify
_ipv4_pattern = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}' r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
_ipv6_pattern = r'^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}' r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)' r'((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})' r'(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}' r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}' r'[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)' r'(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}' r':|:(:[A-F0-9]{1,4}){7})$'
_ipv4_regex = re.compile(_ipv4_pattern)
_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE)
_ipv4_regexb = re.compile(_ipv4_pattern.encode('ascii'))
_ipv6_regexb = re.compile(_ipv6_pattern.encode('ascii'), flags=re.IGNORECASE)
def _is_ip_address(regex: Pattern[str], regexb: Pattern[bytes], host: Optional[Union[str, bytes]]) -> bool:
...
is_ipv4_address = functools.partial(_is_ip_address, _ipv4_regex, _ipv4_regexb)
is_ipv6_address = functools.partial(_is_ip_address, _ipv6_regex, _ipv6_regexb)
def is_ip_address(host: Optional[Union[str, bytes, bytearray, memoryview]]) -> bool:
...
def next_whole_second() -> datetime.datetime:
"""Return current time rounded up to the next whole second."""
...
_cached_current_datetime = None
_cached_formatted_datetime = ""
def rfc822_formatted_time() -> str:
...
def _weakref_handle(info):
...
def weakref_handle(ob, name, timeout, loop, ceil_timeout: bool = ...):
...
def call_later(cb, timeout, loop):
...
class TimeoutHandle:
""" Timeout handle """
def __init__(self, loop: asyncio.AbstractEventLoop, timeout: Optional[float]) -> None:
...
def register(self, callback: Callable[..., None], *args: Any, **kwargs: Any) -> None:
...
def close(self) -> None:
...
def start(self) -> Optional[asyncio.Handle]:
...
def timer(self) -> BaseTimerContext:
...
def __call__(self) -> None:
...
class BaseTimerContext(ContextManager['BaseTimerContext']):
...
class TimerNoop(BaseTimerContext):
def __enter__(self) -> BaseTimerContext:
...
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]:
...
class TimerContext(BaseTimerContext):
""" Low resolution timeout context manager """
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
...
def __enter__(self) -> BaseTimerContext:
...
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]:
...
def timeout(self) -> None:
...
class CeilTimeout(async_timeout.timeout):
def __enter__(self) -> async_timeout.timeout:
...
class HeadersMixin:
ATTRS = ...
_content_type = ...
_content_dict = ...
_stored_content_type = ...
def _parse_content_type(self, raw: str) -> None:
...
@property
def content_type(self) -> str:
"""The value of content part for Content-Type HTTP header."""
...
@property
def charset(self) -> Optional[str]:
"""The value of charset part for Content-Type HTTP header."""
...
@property
def content_length(self) -> Optional[int]:
"""The value of Content-Length HTTP header."""
...
def set_result(fut: asyncio.Future[_T], result: _T) -> None:
...
def set_exception(fut: asyncio.Future[_T], exc: BaseException) -> None:
...
class ChainMapProxy(Mapping[str, Any]):
__slots__ = ...
def __init__(self, maps: Iterable[Mapping[str, Any]]) -> None:
...
def __init_subclass__(cls) -> None:
...
def __getitem__(self, key: str) -> Any:
...
def get(self, key: str, default: Any = ...) -> Any:
...
def __len__(self) -> int:
...
def __iter__(self) -> Iterator[str]:
...
def __contains__(self, key: object) -> bool:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
| [
"[email protected]"
] | |
ac2cbb0b731b97e581da7a9f035b4ce7209d5dbf | f08336ac8b6f8040f6b2d85d0619d1a9923c9bdf | /223-rectangleArea.py | b77b9c32e8858d4b5b81adab6076c7a69ecfadeb | [] | no_license | MarshalLeeeeee/myLeetCodes | fafadcc35eef44f431a008c1be42b1188e7dd852 | 80e78b153ad2bdfb52070ba75b166a4237847d75 | refs/heads/master | 2020-04-08T16:07:47.943755 | 2019-02-21T01:43:16 | 2019-02-21T01:43:16 | 159,505,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | '''
223.Rectangle Area
Find the total area covered by two rectilinear rectangles in a 2D plane.
Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
Example:
Input: A = -3, B = 0, C = 3, D = 4, E = 0, F = -1, G = 9, H = 2
Output: 45
Note:
Assume that the total area is never beyond the maximum possible value of int.
'''
class Solution:
def computeArea(self, A, B, C, D, E, F, G, H):
"""
:type A: int
:type B: int
:type C: int
:type D: int
:type E: int
:type F: int
:type G: int
:type H: int
:rtype: int
"""
X = [[A,0],[C,0],[E,1],[G,1]]
Y = [[B,0],[D,0],[F,1],[H,1]]
X.sort(key=lambda k: k[0])
Y.sort(key=lambda k: k[0])
#print(X,Y)
common = (X[2][0]-X[1][0])*(Y[2][0]-Y[1][0]) if X[0][1] ^ X[1][1] and Y[0][1] ^ Y[1][1] else 0
return (C-A)*(D-B) + (G-E)*(H-F) - common
| [
"[email protected]"
] | |
4f6a6b2e849a7f5c1d5613269719a4be0e1c765b | 9ec9e6b4a52ff4eca2af1285653faea3ba568439 | /3_feature_TFIDF_GI.py | 8485f986aea8d95cc809e315c85cfcd1d68323fa | [] | no_license | Chihyuuu/News-Stance-Detection | eaf34404c42faec432774a4cc11aeab291e7c934 | 3548db5557c373200a9382890d1de2d4ae39fb0f | refs/heads/master | 2023-03-03T20:13:38.367058 | 2021-02-09T09:45:44 | 2021-02-09T09:45:44 | 337,347,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,076 | py | import pandas as pd
import json
from sklearn.feature_extraction import DictVectorizer
# from sklearn.naive_bayes import MultinomialNB
# from sklearn.naive_bayes import GaussianNB
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.linear_model import LogisticRegression
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
CSHeadline_TFIDF = json.load(open('./txt_CosineSimilarity/TFIDF_forCS_headline.txt','r',encoding="UTF-8"))
CSBody_TFIDF = json.load(open('./txt_CosineSimilarity/TFIDF_forCS_body.txt','r',encoding="UTF-8"))
GI = json.load(open('feature_body_GI.txt','r',encoding="UTF-8"))
data = pd.read_csv("merged_traintest.csv")
# y = data['Stance_2cat']
y = data['Stance_4cat'] #(75385,1)
# print ("y",y)
dict_vec = DictVectorizer(sparse=True)
X1 = dict_vec.fit_transform(CSHeadline_TFIDF.values())
X1 = pd.DataFrame(X1.toarray(),columns = dict_vec.get_feature_names())
# print (X1) # (75385 rows x 3293 columns)
X2 = dict_vec.fit_transform(CSBody_TFIDF.values())
X2 = pd.DataFrame(X2.toarray(),columns = dict_vec.get_feature_names())
# print (X2) # (75385 rows x 4207 columns)
X = pd.concat([X1,X2],axis=1)
del X1
del X2
# print ("X",X) # (75385 rows x 7500 columns)
X3 = dict_vec.fit_transform(GI.values())
X3 = pd.DataFrame(X3.toarray(),columns = dict_vec.get_feature_names())
X = pd.concat([X,X3],axis=1) # (75385 rows x 7516 columns)
del X3
# print (X)# (75385 rows x 7516 columns)
X_train = X.iloc[:49972]
y_train = y.iloc[:49972]
X_test = X.iloc[49972:75385]
y_test = y.iloc[49972:75385]
# ## ------------------MultinomialNB-------------------------
# multiNB = MultinomialNB()
# multiNB.fit(X_train,y_train)
# y_Pred = multiNB.predict(X_test)
# print ("MultinomialNB")
# print ("accuracy:",multiNB.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 agree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
# # print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
# # print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3]))
# ## ------------------GaussianNB-------------------------
# GaussianNB = GaussianNB()
# GaussianNB.fit(X_train,y_train)
# y_Pred = GaussianNB.predict(X_test)
# print ("GaussianNB")
# print ("accuracy:",GaussianNB.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 agree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
# print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
# print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3]))
# ## ------------------DecisionTree--------------------------
# DecisionTree = DecisionTreeClassifier()
# DecisionTree.fit(X_train,y_train)
# y_Pred = DecisionTree.predict(X_test)
# print ("DecisionTree")
# print ("accuracy:",DecisionTree.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 agree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
# # print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
# # print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3]))
# ## ------------------RandomForestClassifier-----------------
# RandomForest = RandomForestClassifier()
# RandomForest.fit(X_train,y_train)
# y_Pred = RandomForest.predict(X_test)
# print ("RandomForest")
# print ("accuracy:",RandomForest.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 agree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
# print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
# print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3]))
## -------------------LinearDiscriminant-------------------
# LinearDiscriminant = LinearDiscriminantAnalysis()
# LinearDiscriminant.fit(X_train,y_train)
# y_Pred = LinearDiscriminant.predict(X_test)
# print ("LinearDiscriminant")
# print ("accuracy:",LinearDiscriminant.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 agree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
# print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
# print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3]))
# ### -------------------LogisticRegression-------------------
# LogisticRegression = LogisticRegression()
# LogisticRegression.fit(X_train,y_train)
# y_Pred = LogisticRegression.predict(X_test)
# print ("LogisticRegression")
# print ("accuracy:",LogisticRegression.score(X_test,y_test))
# print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
# print ("precision" , "recall", "fscore", "support")
# print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
# print ("1 related: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
## -------------------- GradientBoosting --------------------
GradientBoosting = GradientBoostingClassifier()
GradientBoosting.fit(X_train,y_train)
y_Pred = GradientBoosting.predict(X_test)
print ("GradientBoosting")
print ("accuracy:",GradientBoosting.score(X_test,y_test))
print ("confusion_matrix:\n",confusion_matrix(y_test, y_Pred))
print ("precision" , "recall", "fscore", "support")
print ("0 unrelated: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[0]))
print ("1 related: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[1]))
print ("2 disagree: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[2]))
print ("3 discuss: ",precision_recall_fscore_support(y_test, y_Pred, average='micro', labels=[3])) | [
"[email protected]"
] | |
5575a34bb47b7f44bc2177357c0b7f8fb5fef18c | 6260fd806b3bf82a601c86c8a903b49c983d9dda | /w3resource/7.py | 03955a8d513c09e32bafc6d84f5fc6e5dfef3e0a | [] | no_license | skybohannon/python | 6162077e4f18d0ed273d47c342620942e531031b | b78ac8ff1758826d9dd9c969096fb1f10783a4be | refs/heads/master | 2021-09-05T07:09:23.844665 | 2018-01-25T02:58:59 | 2018-01-25T02:58:59 | 106,215,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # 7. Write a Python program to accept a filename from the user and print the extension of that. Go to the editor
# Sample filename : abc.java
# Output : java
user_file = input("Please enter a filename: ")
user_ext = user_file.split(".")
print("The file extension is .{}".format(repr(user_ext[-1]))) | [
"[email protected]"
] | |
ababc65892593091f0d609f3a7ffab6cd76e7776 | 8d05bbf82987cad712d642433a9c70e934fce288 | /89_ListaComposto.py | cbcb2a68e45f3735261f50c071117b9e5b517786 | [] | no_license | Graziele-Rodrigues/100-exercicios-resolvidos-em-python | e84bcbf61820302951a2a1ce90bf95dd6ca6115c | 550cfb52ac4cc9728b6985965be5d30b0b77feaf | refs/heads/main | 2023-08-22T12:21:50.085543 | 2021-10-21T00:35:50 | 2021-10-21T00:35:50 | 419,531,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | ficha = list()
while True:
nome = input('Nome: ')
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1+nota2)/2
ficha.append([nome, [nota1, nota2], media])
resp = input('Quer continuar? [S/N] ')
if resp in 'Nn':
break
print('-='*30)
print(f'{"N°.":<4}{"Nome":<10}{"MÉDIA":>9}') #formatando
print('-'*28)
for i,a in enumerate(ficha):
print(f'{i:<4}{a[0]:<10}{a[2]:>8.1f}')
while True:
print('-'*30)
opc = int(input('Mostrar notas de qual aluno? (999 interrompe): '))
if opc == 999:
print('Fechando...')
break
elif opc<=len(ficha)-1:
print('Notas de {} são {}'.format(ficha[opc][0],ficha[opc][1]))
else:
print('Não há aluno com esse número')
print('Volte sempre') | [
"[email protected]"
] | |
9fb65cbe5866bdf6450aa3d7936e9a4b8ee7a0f7 | ef8ac4dfb73d346a8fe1b7fef18d9a61cc91269c | /algo/sim_anneal/SABase.py | d6a50d2e4fc187e57efdb797db91f211c0ecb710 | [] | no_license | smarttransit-ai/EnergyOptCode-AAAI | 4302ca3ed009b5cecd8139f26e5c653c134e1d1e | 8964dbbde64b2cb9979b5d43223c482da6aca801 | refs/heads/master | 2023-06-08T20:51:28.132956 | 2021-07-06T20:54:51 | 2021-07-06T20:54:51 | 294,603,988 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | import math
import random
from algo.common.types import AssignUtilConfig
from algo.common.util import nearest_neighbour
from algo.greedy.GreedyBase import greedy_assign
from common.configs.global_constants import summary_directory
from common.util.common_util import s_print, create_dir
from common.writer.FileWriter import FileWriter
class SimulatedAnnealing:
def __init__(self, dump_structure, args):
self.min_assign = None
self.min_cost = None
self.summary_file_name = ""
self.run(dump_structure, args)
def run(self, dump_structure, args):
cycle_count = int(args.cycle_count)
start_prob = float(args.start_prob)
end_prob = float(args.end_prob)
swap_prob = float(args.swap_prob)
swap_condition = 0 < swap_prob < 1
start_end_condition = 0 < end_prob < start_prob < 1
s_print("Simulated annealing configs: \ncycle Count: {}\nstart prob: {}\nend prob: {}\nswap prob: {}".
format(args.cycle_count, args.start_prob, args.end_prob, args.swap_prob))
if not swap_condition or not start_end_condition:
raise ValueError("inconsistent parameters")
assignment = greedy_assign(dump_structure, AssignUtilConfig(do_print=True), args=args)
energy_cost = assignment.total_energy_cost()
self.min_assign = assignment
self.min_cost = energy_cost
temp_start = -1.0 / math.log(start_prob)
temp_end = -1.0 / math.log(end_prob)
rate_of_temp = (temp_end / temp_start) ** (1.0 / (cycle_count - 1.0))
selected_temp = temp_start
delta_e_avg = 0.0
number_of_accepted = 1
prefix = "{}_{}_{}_".format(args.start_prob, args.end_prob, args.swap_prob)
prefix = prefix.replace(".", "_")
self.summary_file_name = summary_directory + prefix + "simulated_annealing.csv"
create_dir(summary_directory)
summary_file = FileWriter(self.summary_file_name)
summary_file.write("iteration,energy_cost")
summary_file.write([0, energy_cost])
for i in range(cycle_count):
s_print('Cycle: {} with Temperature: {}'.format(str(i), str(selected_temp)))
nn_assignment = nearest_neighbour(assignment, swap_prob)
nn_energy_cost = nn_assignment.total_energy_cost()
delta_e = abs(nn_energy_cost - energy_cost)
if nn_energy_cost > energy_cost:
if i == 0:
delta_e_avg = delta_e
denominator = (delta_e_avg * selected_temp)
p = math.exp(-1 * math.inf) if denominator == 0 else math.exp(-delta_e / denominator)
accept = True if random.random() < p else False
else:
accept = True
# save current minimum to avoid losing details due to crash
if self.min_cost > nn_energy_cost:
self.min_assign = nn_assignment.copy()
self.min_cost = nn_energy_cost
nn_assignment.write("current_min")
nn_assignment.write_bus_stat("current_min")
if accept:
assignment = nn_assignment
energy_cost = nn_energy_cost
summary_file.write([i, energy_cost])
delta_e_avg = delta_e_avg + (delta_e - delta_e_avg) / number_of_accepted
number_of_accepted += 1
selected_temp = rate_of_temp * selected_temp
summary_file.close()
improve_perc = round(100.0 * (energy_cost - self.min_cost) / energy_cost, 3)
s_print("Improvement in energy cost {}%".format(str(improve_perc)))
| [
"[email protected]"
] | |
d69e9118afde5a31ab09c5f7e2be5a24a50ebb71 | de1bbf1dfe1ac5dfa6e0ced4d58dba856b528eb8 | /AnemoNLP/App/MLBot/django_app/example_app/urls.py | 47d06a23948884cfadced3c9105f270e0a9de48a | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | tonythefreedom/anemonlp | 749b7683a5f97f5c121b399273ad04714c1d9a24 | 01d90b6fcd8ef47a729db26ed93b7ef0724c5e57 | refs/heads/master | 2021-09-03T22:56:37.446413 | 2018-01-12T17:19:35 | 2018-01-12T17:19:35 | 110,652,575 | 9 | 7 | Apache-2.0 | 2017-12-06T07:48:15 | 2017-11-14T06:55:45 | JavaScript | UTF-8 | Python | false | false | 398 | py | from django.conf.urls import include, url
from django.contrib import admin
from views import ChatterBotView
urlpatterns = [
url(r'^admin/', include(admin.site.urls), name='admin'),
url(r'^chatterbot', ChatterBotView.as_view(), name='main'),
url(r'^', include('learningdata_app.urls'), name = 'index'),
url(r'^learningdata', include('learningdata_app.urls'), name = 'learning'),
]
| [
"[email protected]"
] | |
35c9dd19ef1d0bbdfd5644a92542771a5c6fbf58 | 10659041996f62d28cebf9ba92dcad2d6d5ecb26 | /factors of cofficent in counting.py | 02e97f638b7ca1550ec438f04c2c5d2c91a83ad3 | [] | no_license | shailajaBegari/loops | 4e92d04ee55d0564de417b7b126d4b77dc5a8816 | 173d5a2b6c3a921efe5a38e2d763dd59759b05c4 | refs/heads/main | 2023-07-13T23:37:39.642374 | 2021-08-28T10:48:31 | 2021-08-28T10:48:31 | 400,764,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | n=int(input('enter number'))
i=1
fact=1
count=0
while i<=n:
if n%i==0:
print(i)
count=count+1
i=i+1
print(count,'count') | [
"[email protected]"
] | |
e4bf523aeb5149a8f31982fba1698b25c824cee4 | 0f2800d26eb3f0fefc026576e3468e748695f957 | /Python/problem_set02_solutions/7.py | bc6567127eab96fb4b7b33614270ffff8be050ff | [] | no_license | selvin-joseph18/GraduateTrainingProgram2019 | 7410caeb3ae6cc12118a12da64c07443405a2572 | 2634e16e6035c345723274f45dc1b86991ee5514 | refs/heads/master | 2020-07-07T04:22:55.745882 | 2019-10-11T13:26:49 | 2019-10-11T13:26:49 | 203,248,365 | 0 | 0 | null | 2019-08-19T20:50:28 | 2019-08-19T20:50:28 | null | UTF-8 | Python | false | false | 937 | py | '''A palindrome is a word that is spelled the same backward and forward, like "Malayalam" and "Noon" .
Recursively, a word is a palindrome if the first and last letters are the same and the middle is a palindrome.
Write a function called is_palindrome that takes a string argument and returns True if it is a palindrome and False
otherwise. Remember that you can use the built-in function len to check the length of a string.
Use the function definition'''
'''def is_palindrome(s):
if len(s)==1:
print("palindrome")
elif len(s) > 1:
if s[0]==s[-1]:
print('palindrome')
else:
print('not a palindrome')
string = input("enter the string: ")
is_palindrome(string)'''
# OR
def is_palindrome(s):
rev =s[::-1]
if s==rev:
print('palindrome')
else:
print('not a palindrome')
string = input("enter the string: ")
is_palindrome(string) | [
"[email protected]"
] | |
48adf0f1a1b7debec79d8fb62842ecd8bd20ea86 | 0f7cf87c862ddcceb31bf794537511b386091186 | /2주차/2020-12-31/이상민/[구현]주방장 도도새.py | b8d8936f992ae5082256adfd42a320bda16d8590 | [] | no_license | moey920/Algorithm-study | 5e638dff89a3ca285cec9e831ba67f714dd1426c | 6446b084a26bf244b9b4e7df518fe04f065c15b6 | refs/heads/master | 2023-04-13T02:26:45.069562 | 2021-04-29T16:16:28 | 2021-04-29T16:16:28 | 362,867,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | """
[구현]주방장 도도새
3줄 요약
길이가 N인 리스트를 순차적으로 탐색.
리스트의 값을 더해가며, 총 주문 처리 시간 > 근무시간이 되면, 주문을 더이상 받지 않는다.
주문 처리가 가능하다면, cnt 값을 늘려가며 처리 개수를 센다.
"""
N, T = map(int, input().split())
orders = input().split()
total = 0
cnt = 0
for order in orders:
total += int(order)
if total >= T:
break
cnt += 1
print(cnt) | [
"[email protected]"
] | |
4f3a8886eb59966fc5887dccc5604e3f38aad5d6 | 89e21b0c761d450ef8381bc4575e16d29244fb79 | /rr.py | 70882a541779763715acdbdd1f495fc1d98a7fe4 | [] | no_license | BnkColon/operating-systems | 0669b2368cc98b363fdaaf1fd67e134ecdcce7d6 | bf3b60f96f37e727e576e339520659ba5e7f8edd | refs/heads/master | 2021-01-18T07:30:17.331414 | 2016-10-07T15:31:21 | 2016-10-07T15:31:21 | 68,657,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | # Bianca I. Colon Rosado
# $ python rr.py quanta input.txt
from string import *
from sys import argv
class Process:
"""docstring for Process"""
def __init__(self, pid, ptime):
self.id = pid # Take the ID of that instance
self.time = int(ptime) # Take the time of that instance
self.qConsumption = 0 # Initialize the consumption time to 0
def __str__(self): # Return the string version of the instance
return str(self.id) + str(self.qConsumption)
def setTime(self, ptime): # Set the time
self.time = ptime
def getTime(self): # Return the time
return self.time
def getID(self): # Return the ID
return self.id
def setQuanta(self, qConsumption): # Set the Quanta
self.qConsumption = qConsumption
def getQuanta(self): # Return the Quanta
return self.qConsumption
def main():
if (len(argv) == 3): # If recive $ python rr.py quanta input.txt
quanta = int(argv[1]) # Save the quanta number gived in the command line
# print type(quanta) / <type 'int'>
fileInput = argv[2] # Save the file input gived in the command line
# print type(fileInput) / <type 'str'>
else: # If not recieve this $ python rr.py quanta input.txt
quanta = 3 # Assing quanta = 3
fileInput = 'input.txt' # Search for a file named input.txt [10,2,3,4]
f = open(fileInput) # Open the file in read mode
# print f / <open file 'input.txt', mode 'r' at 0x2b366f908e40>
lists = f.readlines() # Read all the file
f.close() # Close the file
results = [None] * len(lists) # Create a empty list with the maxsize of the processes
for i in range(len(lists)): # Iterate throught lists, to create the processes (instances)
lists[i] = Process(i, int(lists[i].strip())) # Process('P'+str(i+i)+':')
quantaTotal = 0 # Variable "Global" to get the quantum time of all processes
average = 0 # Variable that save the average of all the processes
while lists: # While lists is not empty
finished_processes = [] # Empty list to save the index of the processes that finished
for i in range(len(lists)): # Iterate all processes
if (lists[i].getTime() <= quanta): # If the time of the process is minor or equal to the quantum
if (lists[i].getTime() == quanta): # If is equal to the quantum
quantaTotal += quanta # Save the quantum
else: # If the time of the process is minor to the quantum
quantaTotal += lists[i].getTime() # Save time of the process
lists[i].setQuanta(quantaTotal) # Set the quantum to the process
lists[i].setTime(0) # When finished set the time to 0
results[lists[i].getID()] = lists[i] # Insert the index to remove
finished_processes.insert(0, i) # Insert to the list of finished processes
#print i, lists[i].getQuanta()
else: # If the time of the process is bigger to the quantum
lists[i].setTime(int(lists[i].getTime()) - quanta) # To the time rest quanta
quantaTotal += quanta # Save the quantum
lists[i].setQuanta(quantaTotal) # Set the quantum to the process
# print i, lists[i].getQuanta()
for i in finished_processes: # Iterate the list of finished processes
lists.pop(i) # Delete from the list of processes
# Close While
for i in range(len(results)): # Iterate the list of results
print 'P%d:%d' %(results[i].getID() + 1,results[i].getQuanta()) # Print P(ID):Time spended
average += results[i].getQuanta() # Save all the time spended
average = float(average)/ len(results) # to calculate the average
print 'Avg:%1.2f' % (average) # print Average
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4de4e2a2cfdc94c2aba40d277b6693606c8db41f | 06dca5ec0ac9cdcbc42171795c067300bebea24b | /tailieu_robot/robotframework-8f687798c2e7/robotframework-8f687798c2e7/src/robot/utils/misc.py | 8764659f1dbc2320c144787bd66c837e75d0f0f5 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | quangdt/plan_come_on_baby | 0a9dd76feceb1323c22c33586687accefb649392 | c26b0ea98b9649fc8d5c61865a2dfdc829324964 | refs/heads/master | 2021-01-18T13:54:19.176897 | 2015-09-22T01:40:33 | 2015-09-22T01:40:33 | 38,100,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from .unic import unic
def printable_name(string, code_style=False):
"""Generates and returns printable name from the given string.
Examples:
'simple' -> 'Simple'
'name with spaces' -> 'Name With Spaces'
'more spaces' -> 'More Spaces'
'Cases AND spaces' -> 'Cases AND Spaces'
'' -> ''
If 'code_style' is True:
'mixedCAPSCamel' -> 'Mixed CAPS Camel'
'camelCaseName' -> 'Camel Case Name'
'under_score_name' -> 'Under Score Name'
'under_and space' -> 'Under And Space'
'miXed_CAPS_nAMe' -> 'MiXed CAPS NAMe'
'' -> ''
"""
if code_style and '_' in string:
string = string.replace('_', ' ')
parts = string.split()
if not parts:
return ''
if code_style and len(parts) == 1:
parts = _splitCamelCaseString(parts[0])
return ' '.join(part[0].upper() + part[1:] for part in parts if part != '')
def _splitCamelCaseString(string):
parts = []
current_part = []
string = ' ' + string + ' ' # extra spaces make going through string easier
for i in range(1, len(string)-1):
# on 1st/last round prev/next is ' ' and char is 1st/last real char
prev, char, next = string[i-1:i+2]
if _isWordBoundary(prev, char, next):
parts.append(''.join(current_part))
current_part = [char]
else:
current_part.append(char)
parts.append(''.join(current_part)) # append last part
return parts
def _isWordBoundary(prev, char, next):
if char.isupper():
return (prev.islower() or next.islower()) and prev.isalnum()
if char.isdigit():
return prev.isalpha()
return prev.isdigit()
def plural_or_not(item):
count = item if isinstance(item, (int, long)) else len(item)
return '' if count == 1 else 's'
def seq2str(sequence, quote="'", sep=', ', lastsep=' and '):
"""Returns sequence in format 'item 1', 'item 2' and 'item 3'"""
quote_elem = lambda string: quote + unic(string) + quote
if not sequence:
return ''
if len(sequence) == 1:
return quote_elem(sequence[0])
elems = [quote_elem(s) for s in sequence[:-2]]
elems.append(quote_elem(sequence[-2]) + lastsep + quote_elem(sequence[-1]))
return sep.join(elems)
def seq2str2(sequence):
"""Returns sequence in format [ item 1 | item 2 | ... ] """
if not sequence:
return '[ ]'
return '[ %s ]' % ' | '.join(unic(item) for item in sequence)
def getdoc(item):
doc = inspect.getdoc(item) or u''
if isinstance(doc, unicode):
return doc
try:
return doc.decode('UTF-8')
except UnicodeDecodeError:
return unic(doc)
| [
"[email protected]"
] | |
155c892ea3c9cf4abb614bad7233d82bef6ecab8 | 050aa2b8a30ddf8b98fb92b077f21e6a2593f12b | /teacher_manage/urls.py | 257e80e670d956cadf09078d0fc331a38c2fb2ec | [] | no_license | 18055412883/team_info | cd0dcfca0f25c07e48341efa5c3d3276ae1fbd48 | dae9bba70c64aa90834f3791cfb7c99267b86f82 | refs/heads/master | 2023-03-22T23:17:54.401951 | 2021-03-21T05:26:55 | 2021-03-21T05:26:55 | 339,989,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
from . import views
from django.urls import path
urlpatterns =[
path('tch_edit/', views.tch_edit),
path('teacher_add/', views.teacher_add),
path('', views.tch_edit),
path('teacher_list/', views.tch_list),
path('teacher_delete/', views.tch_del),
path('teacher_draft/<str:account>/', views.tch_draft),
]
| [
"[email protected]"
] | |
84f43b493da4922aa43b8e092c662bce4e358e7d | 1ba59e2cf087fc270dd32b24ac1d76e4b309afcc | /config.py | 1b8fab6b06225fad9e290177b7e86c43413ce3c7 | [
"MIT"
] | permissive | yangtong1989/Deep-Residual-Matting | 2d96ce737b2b89859695e6f4f052c8984eba96bb | 24bd5342b862e447fb7f4dec7edebdd73221db18 | refs/heads/master | 2020-08-31T23:48:39.028571 | 2019-10-18T10:12:45 | 2019-10-18T10:12:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
im_size = 320
unknown_code = 128
epsilon = 1e-6
epsilon_sqr = epsilon ** 2
num_classes = 256
num_samples = 43100
num_train = 34480
# num_samples - num_train_samples
num_valid = 8620
# Training parameters
num_workers = 1 # for data-loading; right now, only 1 works with h5py
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 100 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
##############################################################
# Set your paths here
# path to provided foreground images
fg_path = 'data/fg/'
# path to provided alpha mattes
a_path = 'data/mask/'
# Path to background images (MSCOCO)
bg_path = 'data/bg/'
# Path to folder where you want the composited images to go
out_path = 'data/merged/'
max_size = 1600
fg_path_test = 'data/fg_test/'
a_path_test = 'data/mask_test/'
bg_path_test = 'data/bg_test/'
out_path_test = 'data/merged_test/'
##############################################################
| [
"[email protected]"
] | |
0d4625fc88b54343cfaf9f307ef61c930b2c5183 | bdf5c2be642e6ff4e975c2b8419086dd0c9c3da0 | /Bible/Chapter/chap10_file.py | df6e28f2593732e2a5dad69c8fa38c0f2fd7c6ff | [] | no_license | miki1029/Python_Basic | d6b71b039cbbb3175405c921a5e7bc345aab6bf4 | 87bdf0fb3782ca9e977bea85d8f5de49c3463f9e | refs/heads/master | 2016-09-15T18:05:46.902721 | 2014-02-07T18:24:58 | 2014-02-07T18:24:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | #===============================================================================
# 제 10 장 파일
#===============================================================================
#===============================================================================
# 10.1 텍스트 파일 쓰기/읽기
#===============================================================================
with open('t1.txt', 'w', encoding = 'utf-8') as f:
print(f)
#<_io.TextIOWrapper name='t1.txt' mode='w' encoding='utf-8'>
f.write('chap10')
#===============================================================================
# 10.2 줄 단위로 파일 쓰기/읽기
#===============================================================================
lines = ['first line\n', 'second line\n', 'third line\n']
f = open('t2.txt', 'w')
f.writelines(lines)
f.write(''.join(lines))
f.close()
with open('t2.txt') as f:
print(len(f.read()))
#68
import os
print(os.path.getsize('t2.txt'))
#68 윈도우에서는 더 크게 나올 것임.
print(repr(os.linesep))
#'\n' 윈도우에서는 '\r\n'
#파이썬은 '\r\n'을 '\n'으로 인식 후 파일 처리를 함.
#===============================================================================
# 10.3 파일에서 원하는 만큼의 문자 읽기
#===============================================================================
with open('t2.txt') as f:
print(repr(f.read(10)))
#'first line'
print(repr(f.read(10)))
#'\nsecond li'
#===============================================================================
# 10.4 이진 파일 쓰기/읽기
#===============================================================================
with open('t1.bin', 'wb') as f:
#f.write('abcd')
#TypeError: 'str' does not support the buffer interface
f.write('abcd'.encode())
f.write(b'efgh')
with open('t1.bin', 'rb') as f:
b = f.read(5)
print(b)
#b'abcde'
print(b.decode())
#abcde
print(f.readline())
#b'fgh'
#===============================================================================
# 10.5 파일 처리 모드 311p
#===============================================================================
#===============================================================================
# 10.6 임의 접근 파일
#===============================================================================
with open('t.txt', 'wb+') as f:
s = b'0123456789abcdef'
print(f.write(s))
#16
print(f.seek(5))
#5
print(f.tell())
#5
print(f.read(1))
#b'5'
print(f.seek(2, os.SEEK_CUR))
#8
print(f.seek(-3, os.SEEK_END))
#13
print(f.read(1))
#b'd'
#===============================================================================
# 10.7 파일 객체의 메소드와 속성
#===============================================================================
# http://docs.python.org/3.3/library/io.html#i-o-base-classes
#===============================================================================
# 10.8 파일 입출력 예제
#===============================================================================
#===============================================================================
# 10.9 표준 입출력 방향의 전환
#===============================================================================
#print('abcde', file=파일변수)
#------------------------------------------------------------------------------
# 표준 출력을 문자열로 하기
import io
# f = io.StringIO()
with io.StringIO() as f:
print('hello', end=' ', file=f)
print('world', file=f)
print(f.getvalue())
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# 문자열을 파일 객체처럼 읽기
s = '''
Python’s standard library is very extensive,
offering a wide range of facilities as indicated by the long table of contents listed below.
The library contains built-in modules (written in C) that provide access to system functionality
such as file I/O that would otherwise be inaccessible to Python programmers,
as well as modules written in Python that provide standardized solutions
for many problems that occur in everyday programming.
Some of these modules are explicitly designed to encourage and enhance the portability of Python programs
by abstracting away platform-specifics into platform-neutral APIs.
'''
with io.StringIO(s) as f:
print(f.read(6))
f.seek(10)
print(f.read(20))
print(f.readline(), end='')
print(f.readlines())
#------------------------------------------------------------------------------
#===============================================================================
# 10.10 지속 모듈 : 파이썬 객체를 파일에 저장하는 기법 318p
#===============================================================================
# dbm : http://docs.python.org/3.3/library/dbm.html#module-dbm
# pickle : http://docs.python.org/3.3/library/pickle.html?#pickle-python-object-serialization
# marshal : http://docs.python.org/3.3/library/marshal.html#module-marshal
# shelve : http://docs.python.org/3.3/library/shelve.html#module-shelve | [
"[email protected]"
] | |
2cccde80552113d890a0dd816375681d17550690 | 83d2555c127f05e69f5a586fbbfe3294254a39d1 | /countdown.py | 2ec25726566c6abe4528b607d55433d6f88f48f2 | [] | no_license | CAMcGowan/Count-Down | d377989e88bfdd283e043028a9d3625dc7e06925 | 309a0507845cf05a7bad5b7aed230d0867f16cfe | refs/heads/master | 2022-12-29T07:34:29.234021 | 2020-10-15T15:11:13 | 2020-10-15T15:11:13 | 284,314,366 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,005 | py | import numpy as np
import random
class WordRound:
# A number round
def __init__(self):
# Create the letter lists on weights
self.consonants_weights = [ ('B',2), ('C',3), ('D',6), ('F',2), ('G', 3), ('H',2),
('J',1), ('K',1), ('L',5), ('M',4), ('N',8),
('P',4), ('Q',1), ('R',9), ('S',9), ('T',9),
('V',1), ('W',1), ('X',1), ('Y',1), ('Z',1)]
self.vowels_weights = [ ('A',15), ('E',21), ('I',13), ('O',13), ('U',5)]
# Loop to create a weighted list of Consonants
self.consonants_list = []
for item, weight in self.consonants_weights:
self.consonants_list.extend( [item]*weight )
self.consonants_list = np.array(self.consonants_list)
# Loop to create a weighted list of vowels
self.vowels_list = []
for item, weight in self.vowels_weights:
self.vowels_list.extend( [item]*weight )
self.vowels_list = np.array(self.vowels_list)
# List to store letters
self.letters = []
def add_consonant(self):
# Selecting a letter
letter = random.choice(self.consonants_list)
# Index of target element
idx = np.where(self.consonants_list == letter)[0][0]
# Remove letter from list
self.consonants_list = np.delete(self.consonants_list, idx)
# Store the letter
self.letters.append(letter)
def add_vowel(self):
# Selecting a letter
letter = random.choice(self.vowels_list)
# Index of target element
idx = np.where(self.vowels_list == letter)[0][0]
# Remove letter from list
self.vowels_list = np.delete(self.vowels_list, idx)
# Store the letter
self.letters.append(letter)
class NumberRound:
# The class for the number rounds
def __init__(self):
self.numbers = []
self.target = 0
self.small_numbers = np.repeat(np.linspace(1,10, 10, dtype=int), 2)
self.big_numbers = np.linspace(25, 100, 4, dtype=int)
# Define the operators
self.add = lambda a,b: a+b
self.sub = lambda a,b: a-b
self.mul = lambda a,b: a*b
self.div = lambda a,b: a/b if a % b == 0 else 1e6 #if a decimal this isn't allowed, so the large number will make the target answer impossible
# Create a list of operators
self.operators = [(self.add, '+'), (self.sub, '-'), (self.mul, '*'), (self.div, '/')]
self.nums = []
self.target = 0
# lists
self.equation, self.solutions = [], []
def change_target(self):
self.target = random.randint(0,999)
def return_target(self):
return self.target
def add_big(self):
# Find array length
n = len(self.big_numbers)
# Generate an index based on array length
x = self.big_numbers[random.randint(0,n-1)]
# Index of target element
idx = np.where(self.big_numbers == x)[0][0]
# Find and remove target element
self.big_numbers = np.delete(self.big_numbers, idx)
# store the number
self.nums.append(x)
def add_small(self):
# Find array length
n = len(self.small_numbers)
# Generate an index based on array length
x = self.small_numbers[random.randint(0,n-1)]
# Index of target element
idx = np.where(self.small_numbers == x)[0][0]
# Find and remove target element
self.small_numbers = np.delete(self.small_numbers, idx)
# store the number
self.nums.append(x)
def solve(self):
def equation_solver(equation):
# Function to solve the value of the equation
for i in range(len(equation)):
# Store the first value as the current result
if i == 0:
result = equation[i]
# Handle new parts of the equation
else:
# All odd values of i will be operators and even ints
if i % 2 != 0:
result = equation[i][0](result, equation[i+1])
return result
def recursion(equation, nums, target, solutions):
for n in range(len(nums)):
# Add the number to the equation
equation.append( nums[n] )
# Return the remaining numbers
remaining = nums[:n] + nums[n+1:]
# Before going futher check if the equation equals the target value
if equation_solver(equation) == self.target:
# Create a readable string to output the equation
equation_str = ''
# Check type before storing part of equation
for i in equation:
# checks for any int due to numpy int32
if np.issubdtype(type(i), np.int) == True:
equation_str += str(i)
else:
equation_str += i[1]
solutions.append(equation_str)
# If there are still numbers left and target not reached
if len(remaining) > 0:
for op in self.operators:
# Add a new operator
equation.append(op)
# Use recursion to repeat this step for all possible scenarios
equation, solutions = recursion(equation, remaining, self.target, solutions)
equation = equation[:-1]
equation = equation[:-1]
return equation, self.solutions
equation, ans = recursion(self.equation, self.nums, self.target, self.solutions)
if len(ans) > 0:
return ans
else:
return ['No solutions'] | [
"[email protected]"
] | |
0d4ab487c9de86cce3e199c7f5a4c2c87e57c607 | 2612f336d667a087823234daf946f09b40d8ca3d | /python/lib/Lib/site-packages/django/contrib/gis/tests/geoapp/models.py | 89027eedfbc919466ac7c1335c42dfb57aea547a | [
"Apache-2.0"
] | permissive | tnorbye/intellij-community | df7f181861fc5c551c02c73df3b00b70ab2dd589 | f01cf262fc196bf4dbb99e20cd937dee3705a7b6 | refs/heads/master | 2021-04-06T06:57:57.974599 | 2018-03-13T17:37:00 | 2018-03-13T17:37:00 | 125,079,130 | 2 | 0 | Apache-2.0 | 2018-03-13T16:09:41 | 2018-03-13T16:09:41 | null | UTF-8 | Python | false | false | 1,546 | py | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| [
"[email protected]"
] | |
91b4ad0eda79cc7ce097d9f53f2162e52e0d6760 | 242f9594cf03345e79965fd0c6eff9f5ed3042a9 | /chapter03 - Lists/exercise3.1_names.py | 3efdb0a91939458f6bc956ce41cf322f54b3d8ec | [] | no_license | Eqliphex/python-crash-course | f32c9cd864b89ac8d7b1ba0fe612a3a29081ed32 | 952ce3554129a37d0b6ff8a35757f3ddadc44895 | refs/heads/master | 2021-04-03T09:05:31.397913 | 2018-08-29T09:14:41 | 2018-08-29T09:14:41 | 124,416,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | names = ['Morten', 'Hendrik', 'oLgA']
print(names[0])
print(names[1])
print(names[2]) | [
"[email protected]"
] | |
2b05aafb513ea6ad66865aaa00981d7ff30884e1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2733/40186/320060.py | 85feba17c1b35b4a3536d8fcea4725c382ec5d13 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | inp=input()
a=input()
if inp=='8 3' and a=='10 7 9 3 4 5 8 17':
print(10)
print(17)
print(9)
elif a=='5 27 1 3 4 2 8 17':
print(5)
print(27)
print(5)
elif a=='105 2 9 3 8 5 7 7':
print(2)
print(8)
print(9)
print(105)
print(7)
elif inp=='101011':
print(18552)
elif inp=='10101101010111110100110100101010110001010010101001':
print(322173207)
else:
print(inp)
print(a)
print(b) | [
"[email protected]"
] | |
3bacf127b039262cc40bb14e97fd4da50cac4c40 | 1c19db866110afddb04d2e9715b49909c7fbb3d4 | /tests/test_user_locale.py | 4635899202d226e926f9194aa81e0dcb4a0fc936 | [
"BSD-2-Clause"
] | permissive | shane-kerr/peeringdb | 505dd5087abe29c9d6013e81b5322d7259a97106 | 5f189631a4d60d3fde662743508784affc6fa22a | refs/heads/master | 2020-09-14T16:25:33.442466 | 2019-11-21T13:54:32 | 2019-11-21T13:54:32 | 223,183,848 | 0 | 0 | NOASSERTION | 2019-11-21T13:54:34 | 2019-11-21T13:44:59 | null | UTF-8 | Python | false | false | 2,541 | py | import pytest
import json
from django.test import Client, TestCase, RequestFactory
from django.contrib.auth.models import Group
import peeringdb_server.models as models
#from django.template import Context, Template
#from django.utils import translation
class UserLocaleTests(TestCase):
"""
Test peeringdb_server.models.User functions
"""
@classmethod
def setUpTestData(cls):
user_group = Group.objects.create(name="user")
for name in ["user_undef", "user_en", "user_pt"]:
setattr(cls, name,
models.User.objects.create_user(
name, "%s@localhost" % name, first_name=name,
last_name=name, password=name))
cls.user_en.set_locale('en')
cls.user_pt.set_locale('pt')
user_group.user_set.add(cls.user_en)
user_group.user_set.add(cls.user_pt)
user_group.user_set.add(cls.user_undef)
cls.user_undef.save()
cls.user_en.save()
cls.user_pt.save()
def setUp(self):
self.factory = RequestFactory()
def test_user_locale(self):
"""
Tests if user profile page has the right language
Note: Don't use Client.login(...) since it will miss language setting in the session
"""
#t = Template("{% load i18n %}{% get_current_language as LANGUAGE_CODE %}{{ LANGUAGE_CODE }}")
#print(t.render(Context({})))
#translation.activate('pt')
#print(t.render(Context({})))
#u_pt = models.User.objects.get(username="user_pt")
#print(u_pt.get_locale())
c = Client()
resp = c.get("/profile", follow=True)
data = {
"next": "/profile",
"username": "user_en",
"password": "user_en"
}
resp = c.post("/auth", data, follow=True)
self.assertGreater(
resp.content.find('<!-- Current language: en -->'), -1)
c.logout()
data = {
"next": "/profile",
"username": "user_pt",
"password": "user_pt"
}
resp = c.post("/auth", data, follow=True)
self.assertGreater(
resp.content.find('<!-- Current language: pt -->'), -1)
c.logout()
data = {
"next": "/profile",
"username": "user_undef",
"password": "user_undef"
}
resp = c.post("/auth", data, follow=True)
self.assertGreater(
resp.content.find('<!-- Current language: en -->'), -1)
| [
"[email protected]"
] | |
6c2e5f7e762cd2a559918fba98f5bc13373ff267 | b0ed869d682e487ccea18e0cf23321c14a32621d | /clientsApp/crons.py | 4073c91386fc4828df61f5cf7927fe546bb21138 | [] | no_license | Frc3211/AnyShip-Server | b8828d2bcf6ae9ed860a7290f6771e0cb43e87c0 | aa370060922f9f8282e6f6cc3c62d8776d41098e | refs/heads/master | 2021-03-27T19:03:45.478542 | 2015-09-14T08:10:41 | 2015-09-14T08:10:41 | 32,476,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from django_cron import CronJobBase, Schedule
from django.utils import timezone
from .models import *
class RegularDeliveryCron(CronJobBase):
#RUN_EVERY_MINS = 1
RUN_AT_TIMES = ['00:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'regular_delivery_reset'
def do(self):
print "reset regular deliveries"
for regular_delivery in RegularDelivery.objects.all():
regular_delivery.firstDeliver = None
regular_delivery.secondDeliver = None
regular_delivery.thirdDeliver = None
regular_delivery.status = 0
regular_delivery.save()
| [
"[email protected]"
] | |
3da2378512e4c221c9ae7c90e31546e534f57df2 | f16f33bfcc09a7b74960af05cea34651c72baf4b | /services/migrations/0004_service_action.py | c75ce96120c298b026ead347970c924be3f8874d | [] | no_license | buzillo/sale | 0ec90eaa37af5cc60c0ab7f9dde0bb02a4486838 | 304e93a5c4aedb3702165ea621981ea0f6db3899 | refs/heads/master | 2021-08-29T23:15:45.850176 | 2017-12-15T07:49:46 | 2017-12-15T07:49:46 | 111,283,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-16 17:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0003_service_photo'),
]
operations = [
migrations.AddField(
model_name='service',
name='action',
field=models.CharField(default='', max_length=1000, verbose_name='Действие (отображение на главной странице)'),
),
]
| [
"[email protected]"
] | |
d16b64f8695cc6c84f4d5603fce8acf2f90a4ceb | bff6ba0d61a3226a4d4a2b48e37cb2d8c9db7e73 | /child_python.py | b08cfb4eba4df2cfd1d93b121652a2df3004268c | [] | no_license | richoey/testrepo | bf4f14b2011fa3194e0c212fccc1a6ee04fd9264 | 6fea5e1bafccabdeab4dd739161ea0ed685b2d0e | refs/heads/main | 2023-03-30T09:09:20.798788 | 2021-04-08T05:29:42 | 2021-04-08T05:29:42 | 355,756,548 | 0 | 0 | null | 2021-04-08T05:29:42 | 2021-04-08T03:52:06 | Jupyter Notebook | UTF-8 | Python | false | false | 35 | py | print("New child python to merge")
| [
"[email protected]"
] | |
ce12e1820272157a94e5355d8d30e01944efb9e3 | 54c9cf64b65e24e13f40b323b360bd0907d5e2ac | /demo/flask/demo.py | 694bbb12dfae12bf4b6edcf80e1cd2a3dde1d5ee | [] | no_license | ghyang4024/smartcommunity | 72c18e66f3dba50adfbd51f00eb0fb976ee67b54 | fd6d04514e3b92d4e8fa87e46a8dcdb653e38c20 | refs/heads/master | 2022-01-26T03:01:39.743250 | 2019-12-18T07:03:28 | 2019-12-18T07:03:28 | 224,954,591 | 0 | 0 | null | 2022-01-15T05:52:07 | 2019-11-30T03:34:32 | null | UTF-8 | Python | false | false | 2,861 | py | from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, abort, jsonify
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+cymysql://root:[email protected]:3306/demo?charset=utf8'
# 设置每次请求结束后会自动提交数据库中的改动
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# 查询时会显示原始SQL语句
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
class Role(db.Model):
# 定义表名
__tablename__ = 'roles'
# 定义列对象
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
# us = db.relationship('User', backref='role')
# repr()方法显示一个可读字符串
def __repr__(self):
return 'Role:%s' % self.name
# 测试数据暂时存放
tasks = []
@app.route('/add_task/', methods=['POST'])
def add_task():
if not request.json or 'id' not in request.json or 'info' not in request.json:
abort(400)
task = {
'id': request.json['id'],
'info': request.json['info']
}
tasks.append(task)
return jsonify(tasks)
@app.route('/get_task/', methods=['GET'])
def get_task():
if not request.args or 'id' not in request.args:
# 没有指定id则返回全部
return jsonify(tasks)
else:
task_id = request.args['id']
task = list(filter(lambda t: t['id'] == int(task_id), tasks))
return jsonify(task) if task else jsonify({'result': 'not found'})
@app.route('/db_search/', methods=['GET'])
def db_search():
if not request.args or 'name' not in request.args:
return jsonify({'error': 'args=none'})
else:
# search_id = request.args['id']
search_name = request.args['name']
user_role = Role.query.filter_by(name=search_name).first()
return jsonify({'id': user_role.id, 'name': user_role.name}) if user_role else jsonify({'result': 'not found'})
@app.route('/db_insert/', methods=['POST'])
def db_insert():
if not request.json or 'name' not in request.json:
return jsonify({'error': 'date error'})
else:
username = request.json['name']
role_temp = Role(name=username)
db.session.add(role_temp)
db.session.commit()
role_result = Role.query.filter_by(name=username).first()
return jsonify({'id': role_result.id, 'name': role_result.name, 'message': 'success'})
# return jsonify({'message':'success'})
if __name__ == "__main__":
db.drop_all()
db.create_all()
ro1 = Role(name='admin')
ro2 = Role(name='user')
db.session.add_all([ro1, ro2])
db.session.commit()
# import pdb
# pdb.set_trace()
# 将host设置为0.0.0.0,则外网用户也可以访问到这个服务
app.run(host="0.0.0.0", debug=True)
| [
"[email protected]"
] | |
12122d181466c13d1c391f29f595748bccf39384 | 6df48750b84e4b95f2ffbca7e720e2ed72e2643b | /node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi | 2c28824ca735ff9e6f11795f9befb454ac612b92 | [
"Apache-2.0",
"MIT"
] | permissive | injir/nodeApp | 4bb5b5662a27d259c0b21fa39ca83235b211bc14 | b9ef19c1b58fb00513b7e32c50f8a837ddd0344e | refs/heads/master | 2021-01-10T09:47:26.372559 | 2015-10-15T08:48:27 | 2015-10-15T08:48:27 | 44,307,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "x64",
"uv_library": "static_library",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\injir\\.node-gyp\\0.12.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"access": "",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\injir\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cafile": "",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\injir\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\injir\\AppData\\Roaming\\npm\\etc\\npmignore",
"group": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\injir\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "0.12.7",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"prefix": "C:\\Users\\injir\\AppData\\Roaming\\npm",
"production": "",
"proprietary_attribs": "true",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"scope": "",
"searchexclude": "",
"searchopts": "",
"searchsort": "name",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"tmp": "C:\\Users\\injir\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\injir\\.npmrc",
"user_agent": "npm/2.11.3 node/v0.12.7 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"[email protected]"
] | |
33877bf7341e29b7edab2e7b7919f5bd03bfdc76 | 9507ff9e9bca2ca8104369c9e25acd74d308e9b3 | /sta8100_upload/upload.py | 6d962eeda6a0d7bd66233d1d52e6df9d0cd024bf | [] | no_license | yangkang411/python_tool | 03e483c7ec7e1e76284f93cf5b9086fdf98af826 | 713071a9fbabfabcbc3c16ce58d1382c410a7ea3 | refs/heads/master | 2023-03-17T16:14:03.332332 | 2020-09-10T02:37:05 | 2020-09-10T02:37:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #!/usr/bin/python
import os
if __name__ == '__main__':
cmd = "TeseoProgrammer_v2.9.0.exe program -f t5 -i sta.bin -o log.txt -c com53 -b 230400 -m SQI -d 0x10000400 -e TRUE -r TRUE";
print ("cmd = %s" % cmd);
os.system(cmd)
| [
"[email protected]"
] | |
21f2f4e1b507c77f7803e4fdea9993120f580e01 | 987b53acb2c189d574a6d724cbb5be563616efca | /BaseTypes/master.py | f57c70992c1b031141efd48df70e77b771a461f6 | [] | no_license | ofekzaza/DistributedComputingProject | 7e288a0db2df18a574486e947493edd901777257 | 2c595f175c86846886d88ba0cd6cccb97d44d9f5 | refs/heads/master | 2020-03-17T06:58:18.709295 | 2018-07-09T13:40:54 | 2018-07-09T13:40:54 | 133,376,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,083 | py | import communicator
class Master:
coms: [communicator.Communicator]
workersIds = []
workersPort = {}
workersMission = []
id: int = 'master'
typ: str = "worker"
state: int = 1 #state machine: 1 - getting information, 2 - proccesing and sending results
returnTarget: str = -1 #defult
mission:str
curTarger:str
program:str
workersState = {}
listenTarget: int
answer:str
def __init__(self, ports:[int] = [], workers:str = [], program = -1, id: str = "master"): # if program is -1 the program dosent distribute it
port = ports
print('master %s have been created' % id)
print("")
self.coms = []
if len(port) != len(workers):
raise ValueError("the number of the ports does not equal the number of the workers")
for p in port:
self.coms.append(communicator.Communicator(int(p), 'm'))
self.id = id
for w in workers:
self.workersIds.append(w)
for i in range(0, len(workers)):
self.workersPort[self.workersIds[i]] = port[i]
self.workersState[self.workersIds[i]] = 1
self.program = program
self.setMissions()
#self.run()
def run(self, state: int = -1):
"""man function of the master"""
if state == -1:
state = self.state
if state == 1:
self.setWorkers()
if state == 3:
self.listen()
return True
"""
def setMaster(self, option: int = 1): #option 1 is wait, 2 is dont wait, you can add more
"gives the master the base program from an outside code"
if option == 1:
self.curReciver, errorCheck, self.returnTarget, self.mission = self.com.waitRecive(self.id)
if errorCheck != self.name:
print('Worker number %s got the wrong input' % (str(self.id)))
elif option == 2:
self.curReciver, errorCheck, self.returnTarget, self.mission = self.com.recive(self.id)
if errorCheck != self.name:
print('Worker number %s got the wrong input' % (str(self.id)))
"""
def setMissions(self):
pass
def setWorkers(self):
if len(self.coms) != len(self.workersIds) or len(self.coms) != len(self.workersMission):
print("WTFucking fuck in the fuck")
print("WTFucking fuck in the fuck")
print("WTFucking fuck in the fuck")
print(len(self.workersMission))
print(len(self.workersIds))
print("WTFucking fuck in the fuck")
print("WTFucking fuck in the fuck")
print("WTFucking fuck in the fuck")
"""set all the workers in the list"""
for i in range(0, len(self.workersIds)):
self.coms[i].send(self.id, self.workersIds[i], -1, self.workersMission[i])
print("message for workers have been sended!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
def setProgram(self):#dosent works it
"""dosent work yet"""
if self.program == -1:
pass
pass
def addWorker(self, id:int, port:int):
"""add a new worker to this current master"""
idd = id
if idd == -1 or idd in self.workersIds:
for i in range(len(self.workersIds)):
if i != self.workersIds[i]:
idd = i
if idd == -1 or id in self.workersIds:
idd = len(self.workersIds)+1
print("the id is not the original givven id, the new id is " +idd)
self.workersIds.append(idd)
self.workersPort[idd] = port
self.coms.append(communicator.Communicator(port))
print("new worker number: %s have been created" %(str(idd)))
def printWorkers(self):
print("Workers of master number "+str(self.id)+" are:")
for i in self.workersIds:
print("worker number '%s' have port number '%s'" % (str(i), str(self.workersPort[i])))
print("")
def listenAll(self):
"""function auto search any input from workers"""
returnValue = [[],[],[],[]]
i = 0
for c in self.coms:
returnValue[0][i], returnValue[1][i], returnValue[2][i], returnValue[3][i] = c.recive(self.id)
i += 1
return returnValue[0], returnValue[1], returnValue[2], returnValue[3]
def listenPort(self, port):
return self.coms[self.ports.index(port)]
def listen(self):
return self.coms[self.workersIds.index(self.listenTarget)]
def killWorker(self, id):
if id in self.workersIds:
self.coms[self.workersIds.index(id)].kill(id)
print("worker number %s have been commanded to die" % str(id))
else:
print("Illegal input to killWorker")
def setStage(self, stage):
self.state = stage
print("master stage have been changed")
def __del__(self):
for id in self.workersIds:
self.killWorker(id)
print('master have died number %s died' % (str(self.id)))
| [
"[email protected]"
] | |
8edcd266e14b62bb5053d6369487e7c9726e0dda | 38c10c01007624cd2056884f25e0d6ab85442194 | /chrome/chrome_resources.gyp | 492536ca0787a392f82c67762f4eb395a3eb7c79 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 25,319 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/chrome',
'additional_modules_list_file': '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/internal/additional_modules_list.txt',
},
'targets': [
{
# GN version: //chrome:extra_resources
'target_name': 'chrome_extra_resources',
'type': 'none',
# These resources end up in resources.pak because they are resources
# used by internal pages. Putting them in a separate pak file makes
# it easier for us to reference them internally.
'actions': [
{
# GN version: //chrome/browser/resources:memory_internals_resources
'action_name': 'generate_memory_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/memory_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:net_internals_resources
'action_name': 'generate_net_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/net_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:invalidations_resources
'action_name': 'generate_invalidations_resources',
'variables': {
'grit_grd_file': 'browser/resources/invalidations_resources.grd',
},
'includes': ['../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:password_manager_internals_resources
'action_name': 'generate_password_manager_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/password_manager_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:signin_internals_resources
'action_name': 'generate_signin_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/signin_internals_resources.grd',
},
'includes': ['../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:translate_internals_resources
'action_name': 'generate_translate_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/translate_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
'conditions': [
['OS != "ios"', {
'dependencies': [
'../components/components_resources.gyp:components_resources',
'../content/browser/devtools/devtools_resources.gyp:devtools_resources',
'../content/browser/tracing/tracing_resources.gyp:tracing_resources',
'browser/devtools/webrtc_device_provider_resources.gyp:webrtc_device_provider_resources',
],
'actions': [
{
# GN version: //chrome/browser/resources:component_extension_resources
'action_name': 'generate_component_extension_resources',
'variables': {
'grit_grd_file': 'browser/resources/component_extension_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:options_resources
'action_name': 'generate_options_resources',
'variables': {
'grit_grd_file': 'browser/resources/options_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:settings_resources
'action_name': 'generate_settings_resources',
'variables': {
'grit_grd_file': 'browser/resources/settings/settings_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'copies': [
{
# GN version: //chrome/browser/resources:extension_resource_demo
'destination': '<(PRODUCT_DIR)/resources/extension/demo',
'files': [
'browser/resources/extension_resource/demo/library.js',
],
},
],
}],
['chromeos==1 and disable_nacl==0 and disable_nacl_untrusted==0', {
'dependencies': [
'browser/resources/chromeos/chromevox/chromevox.gyp:chromevox',
],
}],
['enable_extensions==1', {
'actions': [
{
# GN version: //chrome/browser/resources:quota_internals_resources
'action_name': 'generate_quota_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/quota_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:sync_file_system_internals_resources
'action_name': 'generate_sync_file_system_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/sync_file_system_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
}],
],
},
{
# GN version: //chrome/browser:chrome_internal_resources_gen
'target_name': 'chrome_internal_resources_gen',
'type': 'none',
'conditions': [
['branding=="Chrome"', {
'actions': [
{
'action_name': 'generate_transform_additional_modules_list',
'variables': {
'additional_modules_input_path':
'browser/internal/resources/additional_modules_list.input',
'additional_modules_py_path':
'browser/internal/transform_additional_modules_list.py',
},
'inputs': [
'<(additional_modules_input_path)',
],
'outputs': [
'<(additional_modules_list_file)',
],
'action': [
'python',
'<(additional_modules_py_path)',
'<(additional_modules_input_path)',
'<@(_outputs)',
],
'message': 'Transforming additional modules list',
}
],
}],
],
},
{
# TODO(mark): It would be better if each static library that needed
# to run grit would list its own .grd files, but unfortunately some
# of the static libraries currently have circular dependencies among
# generated headers.
#
# GN version: //chrome:resources
'target_name': 'chrome_resources',
'type': 'none',
'dependencies': [
'chrome_internal_resources_gen',
'chrome_web_ui_mojo_bindings.gyp:web_ui_mojo_bindings',
],
'actions': [
{
# GN version: //chrome/browser:resources
'action_name': 'generate_browser_resources',
'variables': {
'grit_grd_file': 'browser/browser_resources.grd',
'grit_additional_defines': [
'-E', 'additional_modules_list_file=<(additional_modules_list_file)',
'-E', 'root_gen_dir=<(SHARED_INTERMEDIATE_DIR)',
],
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/common:resources
'action_name': 'generate_common_resources',
'variables': {
'grit_grd_file': 'common/common_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/renderer:resources
'action_name': 'generate_renderer_resources',
'variables': {
'grit_grd_file': 'renderer/resources/renderer_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'conditions': [
['enable_extensions==1', {
'actions': [
{
# GN version: //chrome/common:extensions_api_resources
'action_name': 'generate_extensions_api_resources',
'variables': {
'grit_grd_file': 'common/extensions_api_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
}
],
}],
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# TODO(mark): It would be better if each static library that needed
# to run grit would list its own .grd files, but unfortunately some
# of the static libraries currently have circular dependencies among
# generated headers.
#
# GN version: //chrome:strings
'target_name': 'chrome_strings',
'type': 'none',
'actions': [
{
# GN version: //chrome/app/resources:locale_settings
'action_name': 'generate_locale_settings',
'variables': {
'grit_grd_file': 'app/resources/locale_settings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:chromium_strings
'action_name': 'generate_chromium_strings',
'variables': {
'grit_grd_file': 'app/chromium_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:generated_resources
'action_name': 'generate_generated_resources',
'variables': {
'grit_grd_file': 'app/generated_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:google_chrome_strings
'action_name': 'generate_google_chrome_strings',
'variables': {
'grit_grd_file': 'app/google_chrome_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_strings
'action_name': 'generate_settings_strings',
'variables': {
'grit_grd_file': 'app/settings_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_chromium_strings
'action_name': 'generate_settings_chromium_strings',
'variables': {
'grit_grd_file': 'app/settings_chromium_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_google_chrome_strings
'action_name': 'generate_settings_google_chrome_strings',
'variables': {
'grit_grd_file': 'app/settings_google_chrome_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
},
{
# GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory_gen_sources
'target_name': 'make_chrome_ui_string_overrider_factory',
'type': 'none',
'hard_dependency': 1,
'dependencies': [ 'chrome_strings', ],
'actions': [
{
'action_name': 'generate_ui_string_overrider',
'inputs': [
'../components/variations/service/generate_ui_string_overrider.py',
'<(grit_out_dir)/grit/generated_resources.h'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h',
],
'action': [
'python',
'../components/variations/service/generate_ui_string_overrider.py',
'-N', 'chrome_variations',
'-o', '<(SHARED_INTERMEDIATE_DIR)',
'-S', 'chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'-H', 'chrome/browser/metrics/variations/ui_string_overrider_factory.h',
'<(grit_out_dir)/grit/generated_resources.h',
],
'message': 'Generating generated resources map.',
}
],
},
{
# GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory
'target_name': 'chrome_ui_string_overrider_factory',
'type': 'static_library',
'dependencies': [
'../components/components.gyp:variations_service',
'make_chrome_ui_string_overrider_factory',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h',
],
},
{
# GN version: //chrome/app/resources:platform_locale_settings
'target_name': 'platform_locale_settings',
'type': 'none',
'variables': {
'conditions': [
['OS=="win"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_win.grd',
},],
['OS=="linux"', {
'conditions': [
['chromeos==1', {
'platform_locale_settings_grd':
'app/resources/locale_settings_<(branding_path_component)os.grd',
}, { # chromeos==0
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
}],
],
},],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "linux"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
},],
['OS == "mac" or OS == "ios"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_mac.grd',
}],
], # conditions
}, # variables
'actions': [
{
'action_name': 'generate_platform_locale_settings',
'variables': {
'grit_grd_file': '<(platform_locale_settings_grd)',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/app/theme:theme_resources
'target_name': 'theme_resources',
'type': 'none',
'dependencies': [
'../ui/resources/ui_resources.gyp:ui_resources',
'chrome_unscaled_resources',
],
'actions': [
{
'action_name': 'generate_theme_resources',
'variables': {
'grit_grd_file': 'app/theme/theme_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome:packed_extra_resources
'target_name': 'packed_extra_resources',
'type': 'none',
'dependencies': [
'chrome_extra_resources',
'packed_resources',
],
'actions': [
{
'includes': ['chrome_repack_resources.gypi']
},
],
'conditions': [
['OS != "mac" and OS != "ios"', {
# We'll install the resource files to the product directory. The Mac
# copies the results over as bundle resources in its own special way.
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/resources.pak'
],
},
],
}],
],
},
{
# GN version: //chrome:packed_resources
'target_name': 'packed_resources',
'type': 'none',
'dependencies': [ # Update duplicate logic in repack_locales.py
# MSVS needs the dependencies explictly named, Make is able to
# derive the dependencies from the output files.
'chrome_resources',
'chrome_strings',
'platform_locale_settings',
'theme_resources',
'<(DEPTH)/components/components_strings.gyp:components_strings',
'<(DEPTH)/net/net.gyp:net_resources',
'<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources',
'<(DEPTH)/ui/strings/ui_strings.gyp:ui_strings',
],
'actions': [
{
# GN version: //chrome:repack_locales_pack
'action_name': 'repack_locales_pack',
'variables': {
'pak_locales': '<(locales)',
},
'includes': ['chrome_repack_locales.gypi']
},
{
# GN version: //chrome:repack_pseudo_locales_pack
'action_name': 'repack_pseudo_locales_pack',
'variables': {
'pak_locales': '<(pseudo_locales)',
},
'includes': ['chrome_repack_locales.gypi']
},
{
'includes': ['chrome_repack_chrome_100_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_200_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_material_100_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_material_200_percent.gypi']
},
],
'conditions': [ # GN version: chrome_repack_locales.gni template("_repack_one_locale")
['OS != "ios"', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/content/app/resources/content_resources.gyp:content_resources',
'<(DEPTH)/content/app/strings/content_strings.gyp:content_strings',
'<(DEPTH)/device/bluetooth/bluetooth_strings.gyp:bluetooth_strings',
'<(DEPTH)/third_party/WebKit/public/blink_resources.gyp:blink_resources',
],
}, { # else
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/ios/chrome/ios_chrome_resources.gyp:ios_strings_gen',
],
'actions': [
{
'includes': ['chrome_repack_chrome_300_percent.gypi']
},
],
}],
['use_ash==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/ash/ash_resources.gyp:ash_resources',
'<(DEPTH)/ash/ash_strings.gyp:ash_strings',
],
}],
['toolkit_views==1', {
'dependencies': [
'<(DEPTH)/ui/views/resources/views_resources.gyp:views_resources',
],
}],
['chromeos==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/remoting/remoting.gyp:remoting_resources',
'<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_resources',
'<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_strings',
],
}],
['enable_autofill_dialog==1 and OS!="android"', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/third_party/libaddressinput/libaddressinput.gyp:libaddressinput_strings',
],
}],
['enable_extensions==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/extensions/extensions_strings.gyp:extensions_strings',
],
}],
['enable_app_list==1', {
'dependencies': [
'<(DEPTH)/ui/app_list/resources/app_list_resources.gyp:app_list_resources',
],
}],
['OS != "mac" and OS != "ios"', {
# Copy pak files to the product directory. These files will be picked
# up by the following installer scripts:
# - Windows: chrome/installer/mini_installer/chrome.release
# - Linux: chrome/installer/linux/internal/common/installer.include
# Ensure that the above scripts are updated when adding or removing
# pak files.
# Copying files to the product directory is not needed on the Mac
# since the framework build phase will copy them into the framework
# bundle directly.
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_100_percent.pak'
],
},
{
'destination': '<(PRODUCT_DIR)/locales',
'files': [
'<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(locales))'
],
},
{
'destination': '<(PRODUCT_DIR)/pseudo_locales',
'files': [
'<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(pseudo_locales))'
],
},
],
'conditions': [
['branding=="Chrome"', {
'copies': [
{
# This location is for the Windows and Linux builds. For
# Windows, the chrome.release file ensures that these files
# are copied into the installer. Note that we have a separate
# section in chrome_dll.gyp to copy these files for Mac, as it
# needs to be dropped inside the framework.
'destination': '<(PRODUCT_DIR)/default_apps',
'files': ['<@(default_apps_list)']
},
],
}],
['enable_hidpi == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_200_percent.pak',
],
},
],
}],
['enable_topchrome_md == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_100_percent.pak',
],
},
],
}],
['enable_hidpi == 1 and enable_topchrome_md == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_200_percent.pak',
],
},
],
}],
], # conditions
}], # end OS != "mac" and OS != "ios"
], # conditions
},
{
# GN version: //chrome/app/theme:chrome_unscaled_resources
'target_name': 'chrome_unscaled_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_chrome_unscaled_resources',
'variables': {
'grit_grd_file': 'app/theme/chrome_unscaled_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/browser/resources:options_test_resources
'target_name': 'options_test_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_options_test_resources',
'variables': {
'grit_grd_file': 'browser/resources/options_test_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/test/data/resources:webui_test_resources
'target_name': 'webui_test_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_webui_test_resources',
'variables': {
'grit_grd_file': 'test/data/webui_test_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome:browser_tests_pak
'target_name': 'browser_tests_pak',
'type': 'none',
'dependencies': [
'options_test_resources',
'webui_test_resources',
],
'actions': [
{
'action_name': 'repack_browser_tests_pak',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/options_test_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/chrome/webui_test_resources.pak',
],
'pak_output': '<(PRODUCT_DIR)/browser_tests.pak',
},
'includes': [ '../build/repack_action.gypi' ],
},
],
},
], # targets
}
| [
"[email protected]"
] | |
b8f08c7a4562131149ca80b371a214d505d27b90 | c42a650e2e99d0b6e54c67f825a4d29ecb3f5e84 | /flask_pj/june/june/tasks/__init__.py | 1bd1b05543b8609588afebfad5b9fc9327442857 | [
"BSD-3-Clause"
] | permissive | suddle/my-project | 339424c0c44a68735cd35c472436c7335078e190 | 5f10e7e7c64fa2c6254eb81421a6b15cfc977dbd | refs/heads/master | 2021-05-09T14:07:01.648836 | 2018-08-12T08:05:35 | 2018-08-12T08:05:35 | 119,055,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # flake8: noqa
from .mail import signup_mail, find_mail
| [
"[email protected]"
] | |
3065b51a201544ac2ccffe33edc8cc9b0e1a8100 | fb399006217841437a59bfff89627a0ee6fb6a16 | /script/encoding.py | ff41a1431dd68d25befa2fbd8735dfa26841a52a | [] | no_license | Beddywang/test | 9a6a0bbec4d430629ae7aada66516fd6e8d85f56 | c0a2c2d9f281afeb6e24e78f04ff5f37816cea79 | refs/heads/master | 2020-03-19T03:05:36.980912 | 2018-06-01T09:10:17 | 2018-06-01T09:10:17 | 135,694,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # coding = utf-8
import array
import json
class Header:
def __init__(self, Type, Name, Method):
self.Type = Type
self.Client = Name
self.Method = Method
class Message:
def __init__(self, header, body):
self.Header = header
self.Body = body
class Packet :
def __init__(self, message):
hs = json.dumps(message.Header, default=lambda obj: obj.__dict__)
content = bytes(intToBytes(len(hs)))+bytes(hs)+bytes(message.Body)
self.length = len(content)
self.content = content
def intToBytes(n): ## LittleEndian
b = bytearray([0, 0, 0, 0])
b[0] = n & 0xFF
n >>= 8
b[1] = n & 0xFF
n >>= 8
b[2] = n & 0xFF
n >>= 8
b[3] = n & 0xFF
return b
def bytesToInt(bs):
return int(array.array("I", bs)[0])
def encode(name, menthod, param):
h = Header(0, name, menthod)
m = Message(h, param)
p = Packet(m)
return intToBytes(p.length) + p.content
def decode():
print "" | [
"[email protected]"
] | |
1ad5015c45fdc53fab44ac8ca1ab992a0f35c120 | f58b2a05dee64fb72daa46b24b7abf52ad28c87d | /car_dealership/forms.py | a85252d2287cc44aa545dbe6ed298602ca60d898 | [] | no_license | wkgfowler/Flask-Car-Collection | ecb7b40bc369c504150c006f6e1d0a9da05d0931 | f83940ebf0133f79d9f1a4466060c392a2aa04ff | refs/heads/master | 2023-04-14T03:04:03.541494 | 2021-04-28T01:18:12 | 2021-04-28T01:18:12 | 362,298,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email
class UserLoginForm(FlaskForm):
email = StringField('Email', validators = [DataRequired(), Email()])
password = PasswordField('Password', validators = [DataRequired()])
submit_button = SubmitField()
class UserSignupForm(FlaskForm):
email = StringField('Email', validators = [DataRequired(), Email()])
first_name = StringField('First Name')
last_name = StringField('Last Name')
password = PasswordField('Password', validators = [DataRequired()])
submit_button = SubmitField() | [
"[email protected]"
] | |
37267b9d176703bfa0ccfc0f5b44ef463f69ea53 | 9930638a8061f1e9f7c2313c34846d6c5295d747 | /Quiz41_Yusuf Syarif Iqbal_1201184320.py | 2038f1a58232b71efd2b910a885abbdcba9de425 | [
"Unlicense"
] | permissive | yusufsyarif/Quiz-4-Alpro | 6ee82c066b53694c9e05c43d6921f46dda1a7657 | dc59622409a500d73cc0ddbbed2fa8850c919ba7 | refs/heads/master | 2020-07-24T15:16:48.701620 | 2019-09-12T05:06:42 | 2019-09-12T05:06:42 | 207,966,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | StudentofFRI = ["Anton", "Budi", "Doni", "Huda"]
print("List of Student = ")
print(StudentofFRI[0])
print(StudentofFRI[1])
print(StudentofFRI[2])
print(StudentofFRI[3]) | [
"[email protected]"
] | |
b96f87793a172b9a59b09d49308b082f87c7a4a9 | d51dcfa375a102b36af2cb996bf31fb177f53ee2 | /cwsp-2.0/bin/sea_station_profile_parser.py | 58dcce72e35c4c8cbed232545e8b79033b94c1c4 | [] | no_license | hsinkai/crowa | 7683425a163791337fc277fe2d6e607bfcb8017b | 839d7efefed03f644557e20dcce563f61345a657 | refs/heads/master | 2020-04-01T09:52:04.108964 | 2018-10-16T02:37:51 | 2018-10-16T02:37:51 | 153,092,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | #!/home/crsadm/.conda/envs/crs-py27/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import datetime
import logging
from dateutil import parser
from xml.etree.ElementTree import parse
import petl
raise SystemExit()
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, basedir)
from cwsp.conf import Context, CONFIG_ENVAR_PATH, HOME
def extract(profile, workdir):
dicts = []
outer = parse(profile)
station_ids = outer.findall('./stationID')
updatetime_str = outer.getroot().attrib.get('updatetime')
modify_time = parser.parse(updatetime_str, ignoretz=True) if updatetime_str else datetime.datetime.now()
for station_id_tree in station_ids:
status = station_id_tree.find('./status')
if status is not None and status.find('./station') is not None:
if status.find('./station').text.strip().startswith(u'無觀測'):
continue
profile = station_id_tree.find('./profile')
station_id = station_id_tree.attrib['id'].strip()
# filename = station_id + '.xml'
outer_data = {
'station_id': station_id,
'seas_chName': profile.find('./seas_chName').text.strip(),
'latitude': float(profile.find('./latitude').text.strip()),
'longitude': float(profile.find('./longitude').text.strip()),
'chName': getattr(profile.find('./chName'), 'text', None) and profile.find('./chName').text.strip(),
'chCity': getattr(profile.find('./chCity'), 'text', None) and profile.find('./chCity').text.strip(),
'kind_chName': getattr(profile.find('./kind_chName'), 'text', None) and profile.find('./kind_chName').text.strip(),
'chTown': getattr(profile.find('./chTown'), 'text', None) and profile.find('./chTown').text.strip(),
'chLocation': getattr(profile.find('./chLocation'), 'text', None) and profile.find('./chLocation').text.strip(),
'dataItem': getattr(profile.find('./dataItem'), 'text', None) and profile.find('./dataItem').text.strip(),
# 'file_path': (workdir + filename) if workdir.endswith('/') else ('%s/%s' % (workdir, filename)),
'modifytime': modify_time,
'updatetime': datetime.datetime.now(),
}
dicts.append(outer_data)
return petl.wrap([row for row in petl.fromdicts(dicts)])
def load(table):
from dbman import RWProxy
db_proxy = RWProxy(db_config=os.path.join(HOME, 'cfg', 'db.yaml'), db_label='61.60.103.175/CRSdb')
db_proxy.todb(table, table_name='meta_sea_station_profile', mode='update', duplicate_key=('station_id', ))
for sql in db_proxy.writer.make_sql():
Context.logger.info(sql)
db_proxy.close()
if __name__ == '__main__':
# parse commandline
default_workdir = '/CRSdata/dataPool/MAROBS'
# default_workdir = r'E:/CRS/cwsp/workspace/aaa/'
default_log_path = "sea_station_profile_parser." + datetime.datetime.now().strftime('%Y%m%d')
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('xml', help=u"上游及時海況資料包")
arg_parser.add_argument('-f', default=None, help=u"指定設定檔,預設取環境變數 $%s。" % CONFIG_ENVAR_PATH)
arg_parser.add_argument('-datapool', default=default_workdir, help=u"DataPool目錄, 預設:%s" % default_workdir)
arg_parser.add_argument('-log', default=default_log_path, help=u'記錄檔路徑, 預設:%s' % default_log_path)
args = arg_parser.parse_args()
Context.init_config(config_path=args.f)
Context.init_logger(name=__file__, filename=args.log)
Context.logger.addHandler(logging.StreamHandler())
Context.logger.info('Received: %s' % args.xml)
table = extract(args.xml, args.datapool)
load(table)
| [
"crsadm@cwspapsvr1.(none)"
] | crsadm@cwspapsvr1.(none) |
07f26b73cf3768bf73248fad6305b8ff5a3fdf55 | 589ac0a71099f4ee6857a31986305f0df2c16ede | /Bio/ExPASy/Prodoc.py | ad12d8b8a02347862b5f62b62ed3e670a0ef61ba | [
"LicenseRef-scancode-biopython"
] | permissive | barendt/biopython | 802aad89005b302b6523a934071796edbd8ac464 | 391bcdbee7f821bff3e12b75c635a06bc1b2dcea | refs/heads/rna | 2021-11-09T19:11:56.345314 | 2010-05-01T02:44:42 | 2010-05-01T02:44:42 | 636,700 | 0 | 0 | NOASSERTION | 2021-11-05T13:10:14 | 2010-04-29T02:35:46 | Python | UTF-8 | Python | false | false | 5,232 | py | # Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the prosite.doc file from
Prosite.
http://www.expasy.ch/prosite/
Tested with:
Release 15.0, July 1998
Release 16.0, July 1999
Release 20.22, 13 November 2007
Release 20.43, 10 February 2009
Functions:
read Read a Prodoc file containing exactly one Prodoc entry.
parse Iterates over entries in a Prodoc file.
Classes:
Record Holds Prodoc data.
Reference Holds data from a Prodoc reference.
DEPRECATED functions:
index_file Index a Prodoc file for a Dictionary.
_extract_record Extract Prodoc data from a web page.
DEPRECATED classes:
Dictionary Accesses a Prodoc file using a dictionary interface.
RecordParser Parses a Prodoc record into a Record object.
_Scanner Scans Prodoc-formatted data.
_RecordConsumer Consumes Prodoc data to a Record object.
Iterator Iterates over entries in a Prodoc file.
"""
def read(handle):
record = __read(handle)
# We should have reached the end of the record by now
line = handle.readline()
if line:
raise ValueError("More than one Prodoc record found")
return record
def parse(handle):
while True:
record = __read(handle)
if not record:
return
yield record
class Record:
"""Holds information from a Prodoc record.
Members:
accession Accession number of the record.
prosite_refs List of tuples (prosite accession, prosite name).
text Free format text.
references List of reference objects.
"""
def __init__(self):
self.accession = ''
self.prosite_refs = []
self.text = ''
self.references = []
class Reference:
"""Holds information from a Prodoc citation.
Members:
number Number of the reference. (string)
authors Names of the authors.
citation Describes the citation.
"""
def __init__(self):
self.number = ''
self.authors = ''
self.citation = ''
# Below are private functions
def __read_prosite_reference_line(record, line):
line = line.rstrip()
if line[-1] != '}':
raise ValueError("I don't understand the Prosite reference on line\n%s" % line)
acc, name = line[1:-1].split('; ')
record.prosite_refs.append((acc, name))
def __read_text_line(record, line):
record.text += line
return True
def __read_reference_start(record, line):
# Read the references
reference = Reference()
reference.number = line[1:3].strip()
if line[1] == 'E':
# If it's an electronic reference, then the URL is on the
# line, instead of the author.
reference.citation = line[4:].strip()
else:
reference.authors = line[4:].strip()
record.references.append(reference)
def __read_reference_line(record, line):
if not line.strip():
return False
reference = record.references[-1]
if line.startswith(' '):
if reference.authors[-1]==',':
reference.authors += line[4:].rstrip()
else:
reference.citation += line[5:]
return True
raise Exception("I don't understand the reference line\n%s" % line)
def __read_copyright_line(record, line):
# Skip the copyright statement
if line.startswith('+----'):
return False
return True
def __read(handle):
# Skip blank lines between records
for line in handle:
line = line.rstrip()
if line and not line.startswith("//"):
break
else:
return None
record = Record()
# Read the accession number
if not line.startswith("{PDOC"):
raise ValueError("Line does not start with '{PDOC':\n%s" % line)
if line[-1] != '}':
raise ValueError("I don't understand accession line\n%s" % line)
record.accession = line[1:-1]
# Read the Prosite references
for line in handle:
if line.startswith('{PS'):
__read_prosite_reference_line(record, line)
else:
break
else:
raise ValueError("Unexpected end of stream.")
# Read the actual text
if not line.startswith('{BEGIN'):
raise ValueError("Line does not start with '{BEGIN':\n%s" % line)
read_line = __read_text_line
for line in handle:
if line.startswith('{END}'):
# Clean up the record and return
for reference in record.references:
reference.citation = reference.citation.rstrip()
reference.authors = reference.authors.rstrip()
return record
elif line[0] == '[' and line[3] == ']' and line[4] == ' ':
__read_reference_start(record, line)
read_line = __read_reference_line
elif line.startswith('+----'):
read_line = __read_copyright_line
elif read_line:
if not read_line(record, line):
read_line = None
raise ValueError("Unexpected end of stream.")
| [
"mdehoon"
] | mdehoon |
1cf8dbafbb2c140e16cc4c24f316af8cc7589ca6 | a2d902c5976adce374dce2877b059cfb64e1d5b6 | /testfile/testthread.py | dfc08c97b301cdb9073cd8daf4842b760d4e7420 | [] | no_license | buaanostop/Autotest | 53eebc387014b6fade9a93598eaf0f74814d2f3e | 53de72f1d203b4f50725583ab90988bd254cce07 | refs/heads/master | 2020-05-03T00:34:34.500048 | 2019-05-14T08:37:53 | 2019-05-14T08:37:53 | 178,313,227 | 0 | 4 | null | 2019-05-11T16:32:42 | 2019-03-29T01:57:03 | HTML | UTF-8 | Python | false | false | 11,424 | py | # -*- coding: utf-8 -*-
"""Test类
调用Test类中的各种方法来对模拟器或手机界面进行操作。
"""
import random
import sys
import time
import threading
from com.android.monkeyrunner import MonkeyRunner,MonkeyDevice,MonkeyImage
class Operation():
"""操作类,给Test类记录各种操作"""
def __init__(self, optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring ):
self.optype = optype
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.number = number
self.interval_time = interval_time
self.drag_time = drag_time
self.keyorstring = keyorstring
class Test(threading.Thread):
def __init__(self):
"""初始化"""
threading.Thread.__init__(self)
self.__flag = threading.Event() # 暂停标志
self.__flag.set() # 设为True
self.__running = threading.Event() # 运行标志
self.__running.set() # 设为True
self.__resolution_x = 0 # 分辨率x
self.__resolution_y = 0 # 分辨率y
self.__device = None # 设备
self.__oplist = [] # 模拟操作的列表
def connect(self, resolution_x=540, resolution_y=960):
"""连接模拟器或手机
参数
----------
resolution_x : int
分辨率x值
resolution_y : int
分辨率y值
返回值
----------
int
返回 1 : 成功连接设备
返回 0 : 连接设备失败
示例
----------
>>> a.connect(540, 960)
"""
self.__resolution_x = resolution_x
self.__resolution_y = resolution_y
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connect ...")
self.__device = MonkeyRunner.waitForConnection() # 连接设备或模拟器
if not self.__device:
print("Please connect a device to start.")
return 0
else:
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connection succeeded.")
return 1
def open_app(self, package_name, activity_name):
"""打开设备上的应用
参数
----------
package_name : string
应用的Package Name 包名
activity_name: string
应用的Activity Name 活动名
示例
----------
>>> a.open_app('com.Jelly.JellyFish','com.unity3d.player.UnityPlayerActivity')
"""
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Oppen application ...")
self.__device.startActivity(component = package_name + "/" + activity_name)
MonkeyRunner.sleep(10)
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Open application succeeded.")
def pause(self):
print("pause")
self.__flag.clear()
def resume(self):
print("resume")
self.__flag.set()
def stop(self):
print("stop")
self.__flag.set()
self.__running.clear()
def touch(self,pos_x, pos_y, touch_number=1, interval_time=1):
"""点击屏幕测试
参数
-------------
pos_x : int
点击的位置x
pos_y : int
点击的位置y
touch_numbere : int
点击的次数,默认为1
interval_time : float
多次点击时间隔时间,默认为1秒
"""
#optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring
op = Operation('touch',pos_x,pos_y,0,0,touch_number,interval_time,0,0)
self.__oplist.append(op)
def random_touch(self, touch_number, interval_time):
"""随机点击屏幕测试
参数
-----------
touch_number : int
点击的次数
interval_time : float
每两次点击间隔的时间,秒为单位
示例
-----------
>>> a.random_touch(200, 1)
"""
op = Operation('random_touch',0,0,0,0,touch_number,interval_time,0,0)
self.__oplist.append(op)
def press(self, key_name):
"""按键测试
参数
-----------
key_name : string
按键的名字
"""
op = Operation('press',0,0,0,0,0,0,0,key_name)
self.__oplist.append(op)
def type(self, typestring):
"""键盘输入测试
参数
-------
typestring : string
要输入的字符串
"""
op = Operation('type',0,0,0,0,0,0,0,typestring)
self.__oplist.append(op)
def drag(self,start_x, start_y, end_x, end_y, drag_time=1, drag_number=1, interval_time=1):
"""滑动屏幕测试
参数
---------------
start_x : int
滑动起始位置x
start_y : int
滑动起始位置y
end_x : int
滑动结束位置x
end_y : int
滑动结束位置y
drag_time : float
滑动持续时间,默认为1秒
drag_number : int
滑动次数,默认为1次
interval_time : float
滑动间隔时间,默认为1秒
"""
#optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring
op = Operation('drag',start_x,start_y,end_x,end_y,drag_number,interval_time,drag_time,0)
self.__oplist.append(op)
def random_drag(self, drag_number, interval_time):
"""随机滑动屏幕测试
参数
-----------
drag_number : int
滑动的次数
interval_time : float
每两次滑动间隔的时间,秒为单位
示例
------------
>>> a.random_drag(200, 3)
"""
op = Operation('random_drag',0,0,0,0,drag_number,interval_time,1,0)
self.__oplist.append(op)
def run(self):
opnum = len(self.__oplist)
if(opnum <= 0):
return
for op in self.__oplist:
# touch
if op.optype == 'touch':
touch_number = op.number
pos_x = op.x1
pos_y = op.y1
interval_time = op.interval_time
num = 1
while(num <= touch_number):
if self.__running.isSet():
self.__flag.wait()
print("%stouch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "), num, pos_x, pos_y))
self.__device.touch(pos_x, pos_y, 'DOWN_AND_UP')
num += 1
MonkeyRunner.sleep(interval_time)
else:
self.__oplist[:] = []
return
# random_touch
elif op.optype == 'random_touch':
touch_number = op.number
interval_time = op.interval_time
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test start.")
num = 1
while(num <= touch_number):
if self.__running.isSet():
self.__flag.wait()
x = random.randint(0, self.__resolution_x) # 随机生成位置x
y = random.randint(0, self.__resolution_y) # 随机生成位置y
print("%srandom_touch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x,y))
self.__device.touch(x, y, 'DOWN_AND_UP') # 点击(x,y)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test finished.")
# drag
elif op.optype == 'drag':
start_x = op.x1
start_y = op.y1
end_x = op.x2
end_y = op.y2
drag_time = op.drag_time
drag_number = op.number
interval_time = op.interval_time
num = 1
while(num <= drag_number):
if self.__running.isSet():
self.__flag.wait()
print("%sdrag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,start_x,start_y,end_x,end_y))
self.__device.drag((start_x, start_y), (end_x, end_y), drag_time, 10)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
#random_drag
elif op.optype == 'random_drag':
drag_number = op.number
interval_time = op.interval_time
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test start.")
num = 1
while(num <= drag_number):
if self.__running.isSet():
self.__flag.wait()
x_start = random.randint(0, self.__resolution_x)
y_start = random.randint(0, self.__resolution_y)
x_end = random.randint(0,self.__resolution_x)
y_end = random.randint(0,self.__resolution_y)
print("%srandom_drag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x_start,y_start,x_end,y_end))
self.__device.drag((x_start, y_start), (x_end, y_end), 1, 10)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test finished.")
#press
elif op.optype == 'press':
key_name = op.keyorstring
if self.__running.isSet():
self.__flag.wait()
print("%spress %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),key_name))
self.__device.press(key_name, 'DOWN_AND_UP')
else:
self.__oplist[:] = []
return
#type
elif op.optype == 'type':
typestring = op.keyorstring
if self.__running.isSet():
print("%stype %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),typestring))
self.__device.type(typestring)
else:
self.__oplist[:] = []
return
else:
print("optype error")
##例子
##t1 = Test()
##t1.connect()
##t1.random_touch(5,5)
##t1.start()
##time.sleep(6)
##t1.pause()
##time.sleep(6)
##t1.resume()
##time.sleep(6)
##t1.stop()
##
##t1.join()
| [
"[email protected]"
] | |
a7f8d8f49b6809525e29121763627e7f50f9f9f7 | ab8a34e5b821dde7b09abe37c838de046846484e | /twilio/sample-code-master/notify/v1/user/read-default/read-default.6.x.py | 21a1ceb49f9637120f11fe5bf78cba619a151b3e | [] | no_license | sekharfly/twilio | 492b599fff62618437c87e05a6c201d6de94527a | a2847e4c79f9fbf5c53f25c8224deb11048fe94b | refs/heads/master | 2020-03-29T08:39:00.079997 | 2018-09-21T07:20:24 | 2018-09-21T07:20:24 | 149,721,431 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
users = client.notify.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.users \
.list()
for record in users:
print(record.sid)
| [
"[email protected]"
] | |
9514ae876595ad93efb2095d912376d900a9a105 | e73e4b346a5266ccc256beda9310b40b589baebe | /core/common/gap/page_model/dashboard/dashboard_page.py | 4594d4a1f7f1f5e908668096add8d8495b216125 | [] | no_license | Madansamudralla/python_project | 81a0adca48b86168e9c5cbe20e160eb8a4b7bad3 | e6e44b8ccfa871e3220f60eb27bde63192bfcaad | refs/heads/master | 2020-07-30T21:39:05.405275 | 2019-08-07T07:52:15 | 2019-08-07T07:52:15 | 210,367,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | from selenium.webdriver.common.keys import Keys
from core.common.gap.locators.dashboard.dashboard_page import DashboardPageLocators
import core
class DashboardPage:
def __init__(self, host):
self.host = host
self.driver = core.get(core.res['chrome'], feature="browser")._res.driver.webdriver
def go_to_home_dashboard(self):
"""Method to go on HOME dashboard link
returns:
Home page
"""
self.driver.find_element(*DashboardPageLocators.HOME_TAB_DASHBOARD).click()
def go_to_merchants_dashboard(self):
"""Method to go on Merchants dashboard link
returns:
Merchants page
"""
self.driver.find_element(*DashboardPageLocators.MERCHANTS_TAB_DASHBOARD).click()
def go_to_affiliates_dashboard(self):
"""Method to go on Affiliates dashboard link
returns:
Affiliates page
"""
self.driver.find_element(*DashboardPageLocators.AFFILIATES_TAB_DASHBOARD).click()
def go_to_online_orders_dashboard(self):
"""Method to go on Online Orders dashboard link
returns:
Online Orders
"""
self.driver.find_element(*DashboardPageLocators.ONLINE_ORDERS_TAB_DASHBOARD).click()
def go_to_transactions_dashboard(self):
"""Method to go on Transactions dashboard link
returns:
Transactions page
"""
self.driver.find_element(*DashboardPageLocators.TRANSACTIONS_TAB_DASHBOARD).click()
def go_to_accounting_dashboard(self):
"""Method to go on Accounting dashboard link
returns:
Accounting page
"""
self.driver.find_element(*DashboardPageLocators.ACCOUNTING_TAB_DASHBOARD).click()
def go_to_reports_dashboard(self):
"""Method to go on Reports dashboard link
returns:
Reports page
"""
self.driver.find_element(*DashboardPageLocators.REPORTS_TAB_DASHBOARD).click()
def go_to_tools_dashboard(self):
"""Method to go on Tools dashboard link
returns:
Tools page
"""
self.driver.find_element(*DashboardPageLocators.TOOLS_TAB_DASHBOARD).click()
def go_to_settings_dashboard(self):
"""Method to go on Settings dashboard link
returns:
Settings page
"""
self.driver.find_element(*DashboardPageLocators.SETTINGS_TAB_DASHBOARD).click()
def go_to_search_dashboard(self):
"""Method to go on Search dashboard link
returns:
Search page
"""
self.driver.find_element(*DashboardPageLocators.SEARCH_TAB_DASHBOARD).click()
def go_to_errors_dashboard(self):
"""Method to go on Errors dashboard link
returns:
Errors page
"""
self.driver.find_element(*DashboardPageLocators.ERRORS_TAB_DASHBOARD).click()
def go_to_vendor_alerts_dashboard(self):
"""Method to go on Vendor Alerts dashboard link
returns:
Vendor Alerts page
"""
self.driver.find_element(*DashboardPageLocators.VENDOR_TAB_DASHBOARD_ALERTS).click()
def fill_gap_search_for_something(self, gapsearch):
"""Method to go on SEARCH GAP dashboard link
returns:
SEARCH GAP page
"""
self.driver.find_element(*DashboardPageLocators.GAP_SEARCH_INPUT).send_keys(gapsearch)
self.driver.find_element(*DashboardPageLocators.GAP_SEARCH_INPUT).send_keys(Keys.ENTER)
| [
"[email protected]"
] | |
b1a69c7be4002d81abd91e4ed4511adc0d225ce8 | efe2dc42aabdb57f22156c18acebb2d6771f9de1 | /Backtracking/LeetCode 131 - PalindromePartitioning/PalindromePartitioning_Python/main.py | 815ab379dc2174343caedaafff26860f3000ab52 | [] | no_license | XingXing2019/LeetCode | 5090b41045ab8aa30e9cf7a722816695297d29e2 | ff20e93a3cbbebf64383980b6f8b8f5d3931ba72 | refs/heads/master | 2023-08-21T04:03:18.456537 | 2023-08-20T11:35:28 | 2023-08-20T11:35:28 | 268,739,176 | 16 | 7 | null | 2023-07-23T08:01:00 | 2020-06-02T08:05:13 | C# | UTF-8 | Python | false | false | 659 | py | class Solution:
def partition(self, s: str) -> List[List[str]]:
res = []
self.dfs(s, [], res)
return res
def dfs(self, s, cur, res):
if s == '':
res.append(list(cur))
for i in range(1, len(s) + 1, 1):
word = s[0:i]
if not self.isPalindrome(word):
continue
cur.append(word)
self.dfs(s[i:], cur, res)
cur.pop()
def isPalindrome(self, s) -> bool:
li, hi = 0, len(s) - 1
while li < hi:
if s[li] != s[hi]:
return False
li += 1
hi -= 1
return True | [
"[email protected]"
] | |
e5c5bd57f1d6e3e0a6267ce58a760d28f52b6928 | 1118aec39a839da2ebc508f1d2a6b377aa70274d | /src/unittest/call_with_timeout.py | a75f81ed305247092122f5b613cde3b90d77c8aa | [] | no_license | serg0987/python | b3a9a2b22b4ef5a39e612a0a170ba9629933c802 | 074449ad6b3a90352939c55a9db37bd248cab428 | refs/heads/master | 2020-05-15T09:30:17.500158 | 2015-10-18T21:28:08 | 2015-10-18T21:28:08 | 2,454,952 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | # -*- coding: utf-8 -*-
"""
Created on Jan 5, 2014
filedesc:
@author: serg
"""
import unittest
import time
from functools import wraps
import errno
import os
import signal
import time
import xmlrunner
import sys
import pytest
import gevent
from gevent_utils import BlockingDetector
def call_with_timeout2(fn):
def wrapped(*args, **kwargs):
detector_greenlet = gevent.spawn(BlockingDetector(timeout=3))
gevent.sleep()
try:
print 'call_with_timeout2'
return fn(*args, **kwargs)
finally:
detector_greenlet.kill()
return wrapped
def error_handler(signum, frame):
print 'timeout::'
raise GeneratorExit('Timeout Expired')
TIME = 3
def call_with_timeout(fn):
def wrapped(*args, **kwargs):
old = signal.signal(signal.SIGALRM, error_handler)
signal.alarm(TIME)
try:
return fn(*args, **kwargs)
finally:
signal.signal(signal.SIGALRM, old)
return wrapped
class Test(unittest.TestCase):
def test_main(self):
print 'test_main'
time.sleep(10)
# for i in range(5):
# print 'i', i
# time.sleep(1)
print 'finish'
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(
Test('test_main'),
)
report = './'
runner = xmlrunner.XMLTestRunner(output=report,
stream=sys.stderr,
verbosity=2)
for test in suite._tests:
suite._tests[suite._tests.index(test)] = call_with_timeout2(test)
runner.run(suite)
| [
"[email protected]"
] | |
5f1e99df7e52ee9bc349a2865e5727ffdc987d47 | 7eed761c7f84d32d4485f472bda47589068f220e | /app/forms.py | 3053410b553aa0a8acc140f41cc79348e7d9c898 | [] | no_license | cdvx/microblog | d5b452f87e0a8c66d1a7e5ccd8cd186c6871dde5 | 9df4a7ea07222a5e1d66d3a4b2ab2293683ff0e3 | refs/heads/master | 2022-12-11T16:31:26.838313 | 2018-08-07T15:19:00 | 2018-08-07T15:19:00 | 143,005,359 | 0 | 0 | null | 2022-12-08T02:19:47 | 2018-07-31T11:30:11 | Roff | UTF-8 | Python | false | false | 2,230 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField,SubmitField,TextAreaField
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, Length
from flask_login import current_user
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(),Email()])
password = PasswordField('Password',validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(),EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username')
def validate_email(self, email):
user = User.query.filter_by(email=self.email.data).first()
if user is not None:
raise ValidationError('Please use a different email')
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
submit = SubmitField('submit')
# def __init__(self, original_username, *args, **kwargs):
# super(EditProfileForm, self).__init__(*args, **kwargs)
# self.original_username = original_username
def validate_username(self, email):
if self.username.data != current_user.username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username')
# if self.username.data != self.original_username:
# user = User.query.filter_by(username=self.username.data)
# if user is not None:
# raise ValidationError('Please use a different username')
class PostForm(FlaskForm):
post = TextAreaField('Say something',
validators=[DataRequired(), Length(min=1, max=140)])
submit = SubmitField('submit')
| [
"[email protected]"
] | |
2346d4680457ac250e61832b9f51ac002a4c3431 | 9b9e875c71855e46af23351ddc06c223e25ba01e | /silx/hdf5tree/exercises/ex1_display.py | f29b97b2975a35fdfceff2e4d2db61858ae15420 | [
"CC-BY-4.0"
] | permissive | PiRK/silx-training | 6d490950c2c97e1fdf82773857c9377a87d464f2 | 57bccbea3ed67f8de51d3580069a556bdb3cb678 | refs/heads/master | 2020-05-21T02:10:18.623156 | 2017-03-10T10:59:43 | 2017-03-10T10:59:43 | 84,557,221 | 0 | 0 | null | 2017-03-10T12:21:57 | 2017-03-10T12:21:57 | null | UTF-8 | Python | false | false | 450 | py | #!/usr/bin/env python
import sys
from silx.gui import qt
from silx.gui import hdf5
def main(filenames):
app = qt.QApplication([])
tree = hdf5.Hdf5TreeView(window)
tree.setVisible(True)
model = tree.findHdf5TreeModel()
for filename in filenames:
#
# TODO: Load each filename into the model tree
#
print("Load %s" % filename)
app.exec_()
if __name__ == "__main__":
main(sys.argv[1:])
| [
"[email protected]"
] | |
4c90e5344fae07f5fe170a89ad0aa6b9c7ac388f | 010237bddcd5bd9b3d6b8923b40e78a8d55e06d8 | /forwarding/tracking/tracking_deepsort/m_distance.py | fa766fddc625192fd351db18198c8130cced3760 | [
"MIT"
] | permissive | wy-moonind/trackrcnn_with_deepsort | 6bed6fd5cd0aad93bf648fb5f1c759f59b19f5c9 | bd92bce23baee21747ef463a48399eea63e83e0a | refs/heads/master | 2023-04-03T03:04:22.137763 | 2021-04-09T19:31:41 | 2021-04-09T19:31:41 | 353,462,121 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import numpy as np
from .kalman_filter import KalmanFilter
"""
Calculate mahalanobis distance cost matrix
"""
def cal_mdistance(dj, yi, si):
mdistance = np.linalg.multi_dot((dj - yi).T, si, (dj - yi))
return mdistance
def mdistance_cost(tracks, detections):
"""
Parameter :
---------
tracks : list of list[mean, covariance]
detections : ndarray of [mean], N*4
-----------
return : costmatrix with shape (len(tracks), detections.shape[0])
where entry(i,j) is the m distance between i-th track
and j-th detection.
"""
cost_matrix = np.zeros((len(tracks), detections.shape[0]))
for i in range(len(tracks)):
temp = KalmanFilter()
cost_matrix[i, : ] = temp.gating_distance(tracks[i][0], tracks[i][1], detections, False)
return cost_matrix | [
"[email protected]"
] | |
9f0aae51e878e9c826cdd3cb138f93134839f502 | 63ebb8e1ee41dafcff1f343f749a651925ab6e63 | /sliding_window/53. Maximum Subarray.py | 04b06cfa02b154a83206783cba6522c8cedb53a9 | [] | no_license | vradja/leetcode | 8f41486732cb8217ff4be47ecb93f71c467eb92b | c496e6d7d4053dee986b455133b293225bfcf58c | refs/heads/master | 2023-07-11T21:10:33.845244 | 2021-07-28T02:13:18 | 2021-07-28T02:13:18 | 269,203,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import itertools
# Approach 1
def max_sub_array_of_size_k_1(K, arr):
window_sum = sum(arr[:K])
max_sum = window_sum
for window_start, value in enumerate(arr[K:]):
window_sum += value - arr[window_start]
max_sum = max(max_sum, window_sum)
return max_sum
# Approach 2:
def max_sub_array_of_size_k(K, arr):
cumulative_sums = [0] + list(itertools.accumulate(arr))
return max(map(lambda begin, end: end - begin, cumulative_sums[:-K], cumulative_sums[K:]))
def main():
print("Maximum sum of a subarray of size K: " + str(max_sub_array_of_size_k(3, [2, 1, 5, 1, 3, 2])))
print("Maximum sum of a subarray of size K: " + str(max_sub_array_of_size_k(2, [2, 3, 4, 1, 5])))
main()
| [
"[email protected]"
] | |
cb28e85295b024bb0498aa6b4989914be951cfa0 | 7963f09b4002249e73496c6cbf271fd6921b3d22 | /tests/test_cpy.py | 7b453154c26e92a9cf985753289721778c504e43 | [] | no_license | thales-angelino/py6502emulator | 6df908fc02f29b41fad550c8b773723a7b63c414 | 1cea28489d51d77d2dec731ab98a6fe8a515a2a8 | refs/heads/master | 2023-03-19T14:46:17.393466 | 2021-03-08T04:10:45 | 2021-03-08T04:10:45 | 345,754,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,773 | py | import unittest
from emulator_6502 import emulator_6502 as emulator
from emulator_6502.instructions import cpy
class TestCPX(unittest.TestCase):
def setUp(self):
self.memory = emulator.Memory()
self.cpu = emulator.CPU(self.memory)
self.cpu.reset()
def test_cpy_scenario_1(self):
operand = 0x10
expected_zero = 0
expected_negative = 0
expected_carry = 1
self.cpu.y = 0x50
self.cpu.cpy(operand)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
def test_cpy_scenario_2(self):
operand = 0x50
expected_zero = 1
expected_negative = 0
expected_carry = 1
self.cpu.y = 0x50
self.cpu.cpy(operand)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
def test_cpy_scenario_3(self):
operand = 0x60
expected_zero = 0
expected_negative = 1
expected_carry = 0
self.cpu.y = 0x50
self.cpu.cpy(operand)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
def test_cpy_immediate(self):
expected_cycles = 2
value = 0x10
self.cpu.y = 0x50
expected_zero = 0
expected_negative = 0
expected_carry = 1
self.memory.memory[emulator.START_ADDRESS] = cpy.CPY_IMMEDIATE_OPCODE
self.memory.memory[emulator.START_ADDRESS + 1] = value
self.cpu.execute(1)
self.assertEqual(self.cpu.cycles, expected_cycles, "CPU cycles should be %d" % expected_cycles)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
def test_cpy_absolute(self):
expected_cycles = 4
value = 0x10
self.cpu.y = 0x50
expected_zero = 0
expected_negative = 0
expected_carry = 1
self.memory.memory[emulator.START_ADDRESS] = cpy.CPY_ABSOLUTE_OPCODE
self.memory.memory[emulator.START_ADDRESS + 1] = 0xff # LSB FIRST!!!
self.memory.memory[emulator.START_ADDRESS + 2] = 0x02
self.memory.memory[0x02ff] = value
self.cpu.execute(1)
self.assertEqual(self.cpu.cycles, expected_cycles, "CPU cycles should be %d" % expected_cycles)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
def test_cpy_zeropage(self):
expected_cycles = 3
value = 0x10
self.cpu.y = 0x50
expected_zero = 0
expected_negative = 0
expected_carry = 1
self.memory.memory[emulator.START_ADDRESS] = cpy.CPY_ZEROPAGE_OPCODE
self.memory.memory[emulator.START_ADDRESS + 1] = 0xff
self.memory.memory[0x00ff] = value
self.cpu.execute(1)
self.assertEqual(self.cpu.cycles, expected_cycles, "CPU cycles should be %d" % expected_cycles)
self.assertEqual(self.cpu.processor_status['carry'], expected_carry, "CPU Carry flag should be %d" % expected_carry)
self.assertEqual(self.cpu.processor_status['zero'], expected_zero, "CPU zero flag should be %d" % expected_zero)
self.assertEqual(self.cpu.processor_status['negative'], expected_negative, "CPU negative flag should be %d" % expected_negative)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
06cab5181ac5b30bf81ed5cfb40402affe9bcbcc | 1af2fb8c49791d8d51c7bf17ad565be56952979b | /DjangoCBVpr/wsgi.py | 52239c56bc10cc028b55eb5dcffbd0bf8ab02eaf | [] | no_license | MohamadAhmadi100/Todo-Django-CBV | 580db48317064fe8bec060f445fb818169c6b5dc | a59893d690b41c31800fb4dd46d5f90eb034ee87 | refs/heads/main | 2023-07-25T15:25:19.896480 | 2023-07-12T12:49:04 | 2023-07-12T12:49:04 | 360,139,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for DjangoCBVpr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoCBVpr.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
ad7fb65c29e1e089acf7dcd03ebff16492f03f4e | 2a7fc4abb3428c285ebe6d530c8383b50f28b37d | /leetcode763_划分字母区间/leetcode763_划分字母区间.py | b2ddeb1385ff7a16ab1f409797bd6d94ccab4c11 | [] | no_license | X-thon/LeetCodeRecord | 9bc508d42120c462888c3860c0207e37707f3c82 | 1616bddb4986df7a3785bc9691022607147c8752 | refs/heads/master | 2020-05-18T19:10:19.467631 | 2019-08-30T15:56:43 | 2019-08-30T15:56:43 | 184,603,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from typing import List
class Solution:
def partitionLabels(self, S: str) -> List[int]:
d = dict()
for i in S:
d[i] = S.rfind(i)
index, res = d[S[0]], []
for i, c in enumerate(S):
if d[c] > index:
index = d[c]
if i == index:
res.append(index + 1 - sum(res))
return res | [
"[email protected]"
] | |
a7da85542253cd53cd17078b3ea11dd532c26fac | 2f6e97d92b940913dbe42090d2066d44768f0127 | /system1/editor-gui/gui/MainWindow.py | ec6cb5f63f528d9dc101e95ebcc1916dab92a30e | [] | no_license | BioroboticsLab/bb_analysis | 848dd2af54a68a77121cb5139cfd9baa1c070f62 | 5b347ec5d963ebc74b109ee741e8ec949036bd86 | refs/heads/master | 2021-01-24T09:45:58.676281 | 2020-01-15T10:03:04 | 2020-01-15T10:03:04 | 56,809,679 | 0 | 0 | null | 2016-11-14T10:58:33 | 2016-04-21T22:35:09 | Python | UTF-8 | Python | false | false | 968 | py | from PyQt4 import QtGui
import data_structures as ds
from LoaderTab import LoaderTab
from EditorTab import EditorTab
class MainWindow( QtGui.QMainWindow ):
def __init__( self, app, parent = None ):
super( MainWindow, self ).__init__( parent )
self.resize( 1000, 600 )
self.setWindowTitle( 'BeesBook Filtering Editor' )
self.central_widget = QtGui.QStackedWidget( self )
self.setCentralWidget( self.central_widget )
self.dset_store = ds.DetectionSetStore()
self.path_manager = ds.PathManager()
self.loader_tab = LoaderTab( self, app )
self.editor_tab = EditorTab( self, app )
self.central_widget.addWidget( self.loader_tab )
self.central_widget.addWidget( self.editor_tab )
self.central_widget.setCurrentWidget( self.loader_tab )
def goto_loader( self ):
self.central_widget.setCurrentWidget( self.loader_tab )
def goto_editor( self ):
self.editor_tab.activate()
self.central_widget.setCurrentWidget( self.editor_tab )
| [
"[email protected]"
] | |
47508a3b9f2141ed5940c7582db50110eb72e9aa | eef1a0e31f723757c5ca8665b9433a9df86d17aa | /func/python/bench_json_loads.py | 311fdb7808b7871b2a891b4608fb5b8789176806 | [
"Apache-2.0"
] | permissive | robinvanemden/Faasm | 09a69fce30300a12d5ba7df55c40a39d81ee5d8f | e005cca20fb4be4ee9ae30f25a5873964b2efd7f | refs/heads/master | 2020-12-01T14:10:51.471549 | 2019-12-20T10:05:17 | 2019-12-20T10:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import json
from performance.benchmarks.bm_json_loads import DICT, TUPLE, DICT_GROUP, bench_json_loads
if __name__ == "__main__":
json_dict = json.dumps(DICT)
json_tuple = json.dumps(TUPLE)
json_dict_group = json.dumps(DICT_GROUP)
objs = (json_dict, json_tuple, json_dict_group)
for x in range(100):
bench_json_loads(objs)
| [
"[email protected]"
] | |
eecf4582690cc3a4b3fffa8685b1de18dbb4daef | 0492adab1ca40fc59457cca2dc1d3e2a6b35e9d2 | /04-TKinter基础/TkinterExample05.py | 9be6ba793fff617046b77638383190c32099bf3e | [] | no_license | starryKey/LearnPython | 0333a7bc015838ff30f54581a740c53701f729d6 | 3d028ca9a5ffe2c2bffd467b04db94356a798d9e | refs/heads/master | 2021-07-19T07:20:49.458121 | 2020-05-17T15:06:38 | 2020-05-17T15:06:38 | 157,088,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | from tkinter import *
baseFrame = Tk()
baseFrame.title = "菜单"
baseFrame.geometry("400x300+300+100")
menubar = Menu(baseFrame)
emenu = Menu(baseFrame)
for item in ['Copy', 'Past', 'Cut']:
emenu.add_command(label=item)
subemenu1 = Menu(baseFrame)
for item in ["New", "Save as"]:
subemenu1.add_command(label=item)
subemenu2 = Menu(baseFrame)
for item in ["Delete"]:
subemenu2.add_command(label=item)
menubar.add_cascade(label='File', menu=subemenu1)
menubar.add_cascade(label='Edit', menu=emenu)
menubar.add_cascade(label='About', menu=subemenu2)
baseFrame['menu'] = menubar
baseFrame.mainloop() | [
"[email protected]"
] | |
8b8929d554b4f886eb1a3863f7d2c0f2bb3e06eb | 659189407e57c7677ba7ec630c4559cbc0d9a14f | /blog/migrations/0001_initial.py | 5698bb1c1c5993cee58dc64037c3f41fcca04e4d | [] | no_license | josevarela24/django_blog | 048704c261a890cf5b5abb49b5bd6f7bb801855d | 495c5b8463803c64253ae519b83b3ce940833287 | refs/heads/master | 2022-11-30T00:22:51.533210 | 2020-01-03T20:59:39 | 2020-01-03T20:59:39 | 229,484,284 | 0 | 0 | null | 2022-11-22T04:55:45 | 2019-12-21T21:17:57 | Python | UTF-8 | Python | false | false | 907 | py | # Generated by Django 3.0.1 on 2019-12-22 16:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
27173302787013bf861b618232c256786ec0685b | 0af5d743d5786a16a2a3d7f67723151cfa487f9a | /scripts/ipic3d_parse_udist_single.py | dd2bbf4a3a19bd9afa18a986da57999c6ede7134 | [] | no_license | allscale/allscale_ipic3d | a673f6e56a5f0b34054d6d17de6fc44ad6d98735 | dcf76b86c05d1153a33c6a0567b039dd89c8f43b | refs/heads/master | 2021-10-16T04:20:06.212504 | 2019-02-07T16:59:53 | 2019-02-07T16:59:53 | 151,078,614 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
import math
import sys
import subprocess
command = "grep 'Throughput:' " + sys.argv[1] + " | awk -F':' '{print $2}' | awk -F' ' '{print $1}'"
(res,err) = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).communicate()
res = res.split()
count = 0
res1 = ""
res2 = ""
res4 = ""
res8 = ""
res16 = ""
res32 = ""
for i in 0, 1, 2, 3:
for j in 0, 1, 2, 3, 4, 5:
res1 += res[i*36 + 0 + j] + " "
res2 += res[i*36 + 6 + j] + " "
res4 += res[i*36 + 12 + j] + " "
res8 += res[i*36 + 18 + j] + " "
res16 += res[i*36 + 24 + j] + " "
res32 += res[i*36 + 30 + j] + " "
print res1
print res2
print res4
print res8
print res16
print res32
| [
"[email protected]"
] | |
a98effc7b04fb871da96b054beab8f8f10ffdae0 | 9df958f8208ba7a861aaceec3e077ec923b205a7 | /TP1/TP1.py | 8d7cd997b3c1da1408ef49b20b6ac72393957005 | [] | no_license | jsuarezbaron/RedesNeu2019 | 3b0173c6a7934c8a55bf72e2ac54a7b4042e7218 | ef6f388debb18db05073e79cc797ab9b044409c7 | refs/heads/master | 2020-05-07T20:41:14.145784 | 2019-07-11T18:41:21 | 2019-07-11T18:41:21 | 180,872,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | import math
import numpy as np
import matplotlib.pyplot as plt
from perceptron_multicapa import PerceptronMulticapa
X = np.genfromtxt(fname='tp1_ej1_training.csv', delimiter=',',dtype=float, usecols=(1,2,3,4,5,6,7,8,9,10))
y = np.genfromtxt(fname='tp1_ej1_training.csv', delimiter=',',dtype=str, usecols=0)
y = np.where(y == 'M', 0.95, 0.05) ###Decidimos convertir las 'M' a 0,95 (a modo de 1) y las 'B' a 0.05 (a modo de 0)
###ya que la sigmoidea no alcanza los valores 1 y 0.
for i in range(10): ###Escalamos los datos de forma estandarizada
mean = np.mean(X[:,i]) ###ya que sin escalar nos daba overflow al evaluarlos en la sigmoidea
std = np.std(X[:,i])
X[:,i] -= mean
X[:,i] /= std
XTrain = X[0:328,:] ###80% training, 10% validation y 10% testing
yTrain = y[0:328]
XValid = X[328:369,:]
yValid = y[328:369]
XTest = X[369:,:]
yTest = y[369:]
np.random.seed(2)
ppn1 = PerceptronMulticapa(10,10,1) ###El primer argumento corresponde a la cantidad de entradas,
###el segundo corresponde a la cantidad de neuronas en la capa oculta
###y el tercero a la cantidad de salidas.
ppn1.train(XTrain, yTrain, XValid, yValid, epochs=1500, eta=0.1)
plt.plot(range(1, len(ppn1.errorsTrain_)+1), ppn1.errorsTrain_,color='b',label='Errores training')
plt.plot(range(1, len(ppn1.errorsValid_)+1), ppn1.errorsValid_,color='r',label='Errores validation')
plt.legend()
plt.grid()
plt.xlabel('Epocas')
plt.ylabel('Error')
plt.show()
testing = np.array([])
for i in range(len(yTest)):
testing = np.append(testing,ppn1.forward(XTest[i,:], redEntrenada=True, w1=ppn1.w1Val, w2=ppn1.w2Val))
testing = np.where(testing > 0.5, 0.95, 0.05)
matrizConfusion = np.zeros([2,2])
for i in range(len(yTest)):
if yTest[i] == 0.95:
if yTest[i] == testing[i]:
matrizConfusion[0,0] += 1 ###Verdadero positivo
else:
matrizConfusion[0,1] += 1 ###Falso negativo
else:
if yTest[i] == testing[i]:
matrizConfusion[1,1] += 1 ###Verdadero negativo
else:
matrizConfusion[1,0] += 1 ###Falso positivo
print(matrizConfusion)
# X = np.genfromtxt(fname='tp1_ej2_training.csv', delimiter=',',dtype=float, usecols=(0,1,2,3,4,5,6,7))
# y = np.genfromtxt(fname='tp1_ej2_training.csv', delimiter=',',dtype=float, usecols=(8,9))
# for i in range(8): ###Escalamos los datos de manera estandarizada
# mean = np.mean(X[:,i]) ###por el mismo motivo que el ejercicio anterior
# std = np.std(X[:,i])
# X[:,i] -= mean
# X[:,i] /= std
# for i in range(2): ###Decidimos usar este rescaling (min-max)
# m = np.min(y[:,i]) ###ya que utilizar estandarización podría
# M = np.max(y[:,i]) ###permitir datos por encima de 1 y por debajo de 0,
# y[:,i] -= m ###valores que la sigmoidea no alcanza.
# y[:,i] /= (M - m) ###Originalmente este rescaling deja los datos en [0,1],
# y[:,i] *= 0.90 ###pero multiplicamos por 0.90 y sumamos 0.05 para que
# y[:,i] += 0.05 ###los datos queden entre [0.05,0.95] y la sigmoidea
# ###alcance dichos valores.
# XTrain = X[0:400,:] ###80% training, 10% validation y 10% testing
# yTrain = y[0:400,:]
# XValid = X[400:450,:]
# yValid = y[400:450,:]
# XTest = X[450:,:]
# yTest = y[450:,:]
# np.random.seed(2)
# ppn2 = PerceptronMulticapa(8,8,2)
# ppn2.train(XTrain, yTrain, XValid, yValid, epochs=1500, eta=1)
# plt.plot(range(1, len(ppn2.errorsTrain_)+1), ppn2.errorsTrain_,color='b',label='Errores training')
# plt.plot(range(1, len(ppn2.errorsValid_)+1), ppn2.errorsValid_,color='r',label='Errores validation')
# plt.legend()
# plt.grid()
# plt.xlabel('Epocas')
# plt.ylabel('Error')
# plt.show()
# testing = np.empty((0,2), float)
# for i in range(np.shape(yTest)[0]):
# testing = np.append(testing,[ppn2.forward(XTest[i,:], redEntrenada=True, w1=ppn2.w1Val, w2=ppn2.w2Val)],axis=0)
# errorTesting = np.mean(sum((yTest - testing)**2))
# print(errorTesting) | [
"[email protected]"
] | |
0f21ff24a0fbeb54950e2810122ef82601826992 | e3a44084de8c7e0d557595ad1e54f07f5a6adde7 | /2020_spring/2020_04_03/1431_GU.py | 2aec93fc325e4aaf1ab2b41ab86a0bb9e07e967f | [] | no_license | hoon4233/Algo-study | 01fcc06c4dce4bfb4bf81c0e9ff208a0d2521512 | 817ec715bd6b2df920ed96921558387995dcfa76 | refs/heads/main | 2023-05-01T08:40:50.438250 | 2021-05-24T05:57:03 | 2021-05-24T05:57:03 | 335,468,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | import sys
import copy
#import re
N = int(sys.stdin.readline())
serial = []
for i in range(N):
ser = sys.stdin.readline().rstrip()
nums = [int(s) for s in ser if s.isdigit()]
serial.append([ser,sum(nums)])
nums.clear()
serial.sort(key = lambda x: (len(x[0]),x[1],x[0]))
for i in range(len(serial)):
print(serial[i][0])
"""
serial.sort(key = lambda x: len(x[0])
change_index =[0,] #when len is changed
for i in range(N-1):
if len(serial[i][0]) != len(serial[i+1][0]):
change_index.append(i+1)
if change_index[len(change_index)-1] != len(serial)-1:
change_index.append(len(serial)-1)
#print(change_index)
for i in range(len(change_index)-1):
start = change_index[i]
end = change_index[i+1]
if end-start != 1:
nl = serial[start:end+1]
nl.sort(key = lambda x: x[1])
for j in range(start,end+1):
serial[j] = nl[j-start]
same_index =[] #when len is changed
for i in range(N-1):
if len(serial[i][0]) == len(serial[i+1][0]) and serial[i][1]==serial[i+1][1]:
same_index.append(i)
same_index.append(i+1)
same_index = list(set(same_index))
packet = []
tmp = []
if(same_index):
v = same_index.pop(0)
tmp.append(v)
while(len(same_index)>0 and same_index):
vv = same_index.pop(0)
if v+1 == vv:
tmp.append(vv)
v = vv
else:
packet.append(tmp)
tmp = []
tmp.append(vv)
v = vv
packet.append(tmp) # packet 부터 리스트에서 연속된 수의 리스트 담은 리스트 - packet
for i in range(len(packet)):
tmp = packet[i]
if len(tmp) == 0:
break
if len(tmp) == 1:
continue
start = tmp[0]
end = tmp[len(tmp)-1]
nl = serial[start:end+1]
nl.sort()
for j in range(start,end+1):
serial[j] = nl[j-start]
"""
#for i in range(len(serial)):
# print(serial[i][0])
""" 처음 시도한 것
check_list =[] #start, end, start, end which ones have same len
for i in range(N-1):
if len(serial[i]) == len(serial[i+1]):
check_list.append(i)
for i in range(N-1):
if len(serial[i]) != len(serial[i+1]):
check_list.append(i)
break
elif i==N-2 and len(serial[i]) == len(serial[i+1]):
check_list.append(N-1)
for i in range(len(check_list)//2):
start = check_list[i]
end = check_list[i+1]
newSorted = ['0']*(start-end+1)
sums =[0]*(start-end+1)
for j in range(start,end+1):
nums = [int(s) for s in serial[j].split() if s.isdigit()]
sums[j-start] = sum(nums)
""" | [
"[email protected]"
] | |
738b4c2e8ea71aa1374de72bcbdaff282bbe4f37 | 8ace8be98c5fb7baac267ca7f83c8085e5cad35c | /26_two_sum_unique_pairs.py | def053f435def022e8e58082e3376b6e647929d4 | [] | no_license | cyberbono3/amazon-oa-python | c063eb275a4d311e58f148c0300c7e19b0f03bea | 7ce502bbe3a30b1d6052a46e7a28b724a327b5ae | refs/heads/master | 2023-01-20T16:23:00.241012 | 2020-11-22T03:49:25 | 2020-11-22T03:49:25 | 293,693,115 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | """
Input: nums = [1, 1, 2, 45, 46, 46], target = 47
1, 1
"""
class Solution:
def unique_pairs(self, nums, target):
s = set()
dic = {}
for i,x in enumerate(nums):
if target - x in s:
dic[target-x] = x
else:
s.add(x)
print(dic)
return len(dic)
sol = Solution()
print(sol.unique_pairs([1, 1, 2, 45, 46, 46], 47)) | [
"[email protected]"
] | |
fe4155275d3a9240634ebe2b2de50705201231ac | a140a7ca1bc5f0af773cb3d22081b4bb75138cfa | /234_palindromLinkedList.py | b1b3a195574aefe83cc26bf49500c32c48a8a3b2 | [] | no_license | YeahHuang/Leetcode | d02bc99d2e890ed0e829515b6f85c4ca6394a1a1 | 78d36486ad4ec2bfb88fd35a5fd7fd4f0003ee97 | refs/heads/master | 2021-07-14T01:53:06.701325 | 2020-06-22T03:01:46 | 2020-06-22T03:01:46 | 166,235,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | class Solution:
def isPalindrome(self, head: ListNode) -> bool:
rev = None
slow = fast = head
while fast and fast.next:
fast = fast.next.next
rev, rev.next, slow = slow, rev, slow.next
if fast:
# fast is at the end, move slow one step further for comparison(cross middle one)
slow = slow.next
while rev and rev.val == slow.val:
slow = slow.next
rev = rev.next
# if equivalent then rev become None, return True; otherwise return False
return not rev | [
"[email protected]"
] | |
63897bcb7d1d451d51497a89ed42b40c7c919bcd | 8c7853822047c1908b7bb5f39531d721dacbed3f | /Python Practice/Assignment.py | af81f0a5477dd1bcad731c9ef95518de49085947 | [] | no_license | AjayKrish24/Assessment | 63cbd8386f4f6fe649abcc3603485ed8647cf6c3 | 6233e268b9812c7f5f859ec03a83691fd3419472 | refs/heads/master | 2022-04-08T06:35:11.142183 | 2020-02-28T11:37:22 | 2020-02-28T11:37:22 | 235,511,361 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py |
1)
string = input("Enter a string : ")
str_list = []
count = ""
for x in string:
if x not in str_list:
str_list.append(x)
for x in str_list:
count = count + x + str(string.count(x))
print(count)
#=======================o/p======================================
Enter a string : aaabbcc
a3b2c2
#***************************************************************************************************
2)
string = [(),("a", "b"),(" ", " ")]
for i in string:
if len(i) == 0:
print("There is an empty tuple in the list")
#=======================o/p======================================
There is an empty tuple in the list
#***************************************************************************************************
4)
word = input()
print(word.title())
#=======================o/p======================================
welcome to python
Welcome To Python
#***************************************************************************************************
5)
import re
ip = input("Enter IP : ")
print(re.match(r"\b(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\b", ip))
#=======================o/p======================================
Enter IP : 123.45.6.88
<_sre.SRE_Match object; span=(0, 11), match='123.45.6.88'>
#***************************************************************************************************
6)
string_list = ["Welcome", "to", "Python"]
print(" ".join(string_list))
#=======================o/p======================================
string_list = ["Welcome", "to", "Python"]
print(" ".join(string_list))
#***************************************************************************************************
| [
"[email protected]"
] | |
2a59f51322dc3315c47936236dd5bf64773e8bd4 | bb64ae47e8041f0f111a009c46484cc2fd5f9a3b | /tests/10-deploy | ec60110e078c7e67b19cef1a3de522a9c793347c | [] | no_license | mbruzek/layer-dockerbeat | 9a0343b552e00f6e6f0fd3bfdb112eb8d59e3823 | e8285fa9be3dd3466b91dfe43d0f353b6a9d53a7 | refs/heads/master | 2020-12-29T00:12:39.972946 | 2016-07-07T15:13:42 | 2016-07-07T15:13:42 | 63,078,714 | 0 | 0 | null | 2016-07-11T15:16:27 | 2016-07-11T15:16:27 | null | UTF-8 | Python | false | false | 332 | #!/usr/bin/python3
import amulet
import unittest
class TestCharm(unittest.TestCase):
def setUp(self):
self.d = amulet.Deployment()
self.d.add('dockerbeat')
self.d.expose('dockerbeat')
self.d.setup(timeout=900)
self.d.sentry.wait()
self.unit = self.d.sentry['dockerbeat'][0]
| [
"[email protected]"
] | ||
a9ee76fd1846fc752464d450bd036b4608171d17 | 7886b494cb12cfc3497d4d6dfe0297dd17f6be92 | /final_project/poi_id_notebook.py | 5e2e1e76d86c90c62eb2273170b744e4c3161bc1 | [] | no_license | Garek31/U3-A-ML-to-Identify-Fraud-in-the-Enron-Corpus | 459308f312a51c74126b1c991eeb394f867a145d | 2900f9695cb1b4feafee6c3d6ff91bc30c7f86f5 | refs/heads/main | 2023-01-30T22:32:50.173928 | 2020-12-15T22:27:07 | 2020-12-15T22:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,601 | py | #!/usr/bin/env python
# coding: utf-8
# # Machine Learning to Identify Fraud in the Enron Corpus
# In[1]:
import warnings
warnings.filterwarnings("ignore")
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
import pandas as pd
import sys
import pickle
import csv
import matplotlib.pyplot as plt
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
#from poi_data import *
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from numpy import mean
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.metrics import accuracy_score, precision_score, recall_score
# ## Task 1: Select what features you'll use
# features_list is a list of strings, each of which is a feature name.
# The first feature must be "poi".
# features_list = ['poi','salary']
# **You will need to use more features**
# In[2]:
target_label = 'poi'
email_features_list = [
'from_messages',
'from_poi_to_this_person',
'from_this_person_to_poi',
'shared_receipt_with_poi',
'to_messages',
]
financial_features_list = [
'bonus',
'deferral_payments',
'deferred_income',
'director_fees',
'exercised_stock_options',
'expenses',
'loan_advances',
'long_term_incentive',
'other',
'restricted_stock',
'restricted_stock_deferred',
'salary',
'total_payments',
'total_stock_value',
]
features_list = [target_label] + financial_features_list + email_features_list
# In[3]:
### Load the dictionary containing the dataset
with open('final_project_dataset.pkl', 'rb') as data_file:
data_dict = pickle.load(data_file)
# In[4]:
df = pd.DataFrame(data_dict)
df.T
# ### 1.1.0 Explore csv file
# In[5]:
def make_csv(data_dict):
""" generates a csv file from a data set"""
fieldnames = ['name'] + data_dict.itervalues().next().keys()
with open('data.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for record in data_dict:
person = data_dict[record]
person['name'] = record
assert set(person.keys()) == set(fieldnames)
writer.writerow(person)
# ### 1.1.1 Dataset Exploration
# In[6]:
print('# Exploratory Data Analysis #')
data_dict.keys()
print('Total number of data points: %d' % len(data_dict.keys()))
num_poi = 0
for name in data_dict.keys():
if data_dict[name]['poi'] == True:
num_poi += 1
print('Number of Persons of Interest: %d' % num_poi)
print('Number of people without Person of Interest label: %d' % (len(data_dict.keys()) - num_poi))
# ### 1.1.2 Feature Exploration
# In[7]:
all_features = data_dict['ALLEN PHILLIP K'].keys()
print('Each person has %d features available' % len(all_features))
### Evaluate dataset for completeness
missing_values = {}
for feature in all_features:
missing_values[feature] = 0
for person in data_dict.keys():
records = 0
for feature in all_features:
if data_dict[person][feature] == 'NaN':
missing_values[feature] += 1
else:
records += 1
# ### Print results of completeness analysis
# In[8]:
print('Number of Missing Values for Each Feature:')
#sorted(missing_values.values())
#for feature in all_features:
# print("%s: %d" % (feature, sorted(missing_values.values())[feature])
for id in sorted(missing_values, key = missing_values.get, reverse = True):
print(id, missing_values[id])
# => classification,we have here unblanced target.
# Maybe Smot methodology ?
# ## Task 2: Remove outliers
# In[9]:
def PlotOutlier(data_dict, feature_x, feature_y):
""" Plot with flag = True in Red """
data = featureFormat(data_dict, [feature_x, feature_y, 'poi'])
for point in data:
x = point[0]
y = point[1]
poi = point[2]
if poi:
color = 'red'
else:
color = 'blue'
plt.scatter(x, y, color=color)
plt.xlabel(feature_x)
plt.ylabel(feature_y)
plt.show()
# ### 2.1 Visualise outliers
# In[10]:
print(PlotOutlier(data_dict, 'total_payments', 'total_stock_value'))
print(PlotOutlier(data_dict, 'from_poi_to_this_person', 'from_this_person_to_poi'))
print(PlotOutlier(data_dict, 'salary', 'bonus'))
#Remove outlier TOTAL line in pickle file.
data_dict.pop( 'TOTAL', 0 )
# ### 2.2 Function to remove outliers
# In[11]:
def remove_outlier(dict_object, keys):
""" removes list of outliers keys from dict object """
for key in keys:
dict_object.pop(key, 0)
outliers = ['TOTAL', 'THE TRAVEL AGENCY IN THE PARK', 'LOCKHART EUGENE E']
remove_outlier(data_dict, outliers)
# ### Task 3: Create new feature(s)
# ### 3.1 create new copies of dataset for grading
# In[12]:
my_dataset = data_dict
# ### 3.2 add new features to dataset
# In[13]:
def compute_fraction(x, y):
""" return fraction of messages from/to that person to/from POI"""
if x == 'NaN' or y == 'NaN':
return 0.
if x == 0 or y == 0:
return 0
fraction = x / y
return fraction
def compute_ratio(poi_from, poi_to,messages_from, messages_to):
""" return fraction of messages from/to that person to/from POI"""
if poi_from == 'NaN' or poi_to == 'NaN' or messages_from =='NaN' or messages_to=='NaN':
return 0.
fraction = (poi_from + poi_to)/(messages_from + messages_to)
return fraction
# By doing further research on the data and its source, we have learned that NaN values in financial and stock columns do not mean a lack of information but a zero value. So we will for each one of its columns replaced the NaN values by zeros.
# In[14]:
for name in my_dataset:
data_point = my_dataset[name]
NaN_value = 0
if data_point['deferral_payments'] == 'NaN' :
data_point['deferral_payments'] = NaN_value
if data_point['total_payments'] == 'NaN' :
data_point['total_payments'] = NaN_value
if data_point['loan_advances'] == 'NaN':
data_point['loan_advances'] = NaN_value
if data_point['bonus'] == 'NaN' :
data_point['bonus'] = NaN_value
if data_point['restricted_stock_deferred'] == 'NaN':
data_point['restricted_stock_deferred'] = NaN_value
if data_point['total_stock_value'] == 'NaN' :
data_point['total_stock_value'] = NaN_value
if data_point['expenses'] == 'NaN' :
data_point['expenses'] = NaN_value
if data_point['exercised_stock_options'] == 'NaN' :
data_point['exercised_stock_options'] = NaN_value
if data_point['long_term_incentive'] == 'NaN' :
data_point['long_term_incentive'] = NaN_value
if data_point['director_fees'] == 'NaN' :
data_point['director_fees'] = NaN_value
if data_point['director_fees'] == 'NaN' :
data_point['director_fees'] = NaN_value
# Thanks to our research, we were able to identify FREVERT MARK A,LAVORATO JOHN J,WHALLEY LAWRENCE G and BAXTER JOHN C in the board of directors, nevertheless these 3 people are not POI. Therefore, we can anticipate that their very high financial data will distort our results in the future, and it is preferable to remove them from the dataset.
# In[15]:
my_dataset.pop('FREVERT MARK A')
my_dataset.pop('LAVORATO JOHN J')
my_dataset.pop('WHALLEY LAWRENCE G')
my_dataset.pop('BAXTER JOHN C')
# In addition, we decided to replace the NaN values in the message columns with the average based on POI and non-POI employees. This will allow us to feed more information to our models.
# In[16]:
cnt_from_poi_to_this_person =0
cnt_from_this_person_to_poi=0
cnt_to_messages =0
cnt_from_messages =0
cnt_shared_receipt_with_poi = 0
cnt_poi_from_poi_to_this_person =0
cnt_poi_from_this_person_to_poi=0
cnt_poi_to_messages =0
cnt_poi_from_messages =0
cnt_poi_shared_receipt_with_poi = 0
sum_poi_from_poi_to_this_person =0
sum_poi_from_this_person_to_poi=0
sum_poi_to_messages =0
sum_poi_from_messages =0
sum_shared_receipt_with_poi = 0
sum_from_poi_to_this_person =0
sum_from_this_person_to_poi=0
sum_to_messages =0
sum_from_messages =0
sum_poi_shared_receipt_with_poi = 0
for name in my_dataset:
data_point = my_dataset[name]
from_poi_to_this_person = data_point["from_poi_to_this_person"]
from_messages = data_point['from_messages']
to_messages = data_point['to_messages']
from_this_person_to_poi = data_point["from_this_person_to_poi"]
poi = data_point["poi"]
shared_receipt_with_poi = data_point["shared_receipt_with_poi"]
if from_messages != 'NaN' and poi ==False:
cnt_from_messages += 1
sum_from_messages += from_messages
elif from_messages != 'NaN' and poi ==True:
cnt_poi_from_messages +=1
sum_poi_from_messages += from_messages
if to_messages != 'NaN' and poi ==False:
cnt_to_messages += 1
sum_to_messages += to_messages
elif to_messages != 'NaN' and poi ==True:
cnt_poi_to_messages +=1
sum_poi_to_messages += to_messages
if from_poi_to_this_person != 'NaN' and poi ==False:
cnt_from_poi_to_this_person += 1
sum_from_poi_to_this_person += from_poi_to_this_person
elif from_messages != 'NaN' and poi ==True:
cnt_poi_from_poi_to_this_person +=1
sum_poi_from_poi_to_this_person+= from_poi_to_this_person
if from_this_person_to_poi != 'NaN' and poi ==False:
cnt_from_this_person_to_poi += 1
sum_from_this_person_to_poi += from_this_person_to_poi
elif from_messages != 'NaN' and poi ==True:
cnt_poi_from_this_person_to_poi +=1
sum_poi_from_this_person_to_poi += from_this_person_to_poi
if shared_receipt_with_poi != 'NaN' and poi ==False:
cnt_shared_receipt_with_poi += 1
sum_shared_receipt_with_poi += shared_receipt_with_poi
elif shared_receipt_with_poi != 'NaN' and poi ==True:
cnt_poi_shared_receipt_with_poi +=1
sum_poi_shared_receipt_with_poi += shared_receipt_with_poi
mean_from_poi_to_this_person = compute_fraction(sum_from_poi_to_this_person,cnt_from_poi_to_this_person)
mean_from_this_person_to_poi= compute_fraction(sum_from_this_person_to_poi, cnt_from_this_person_to_poi)
mean_to_messages =compute_fraction(sum_to_messages,cnt_to_messages)
mean_from_messages =compute_fraction(sum_from_messages,cnt_from_messages)
mean_shared_receipt_with_poi = compute_fraction(sum_shared_receipt_with_poi,cnt_shared_receipt_with_poi)
mean_poi_from_poi_to_this_person = compute_fraction(sum_poi_from_poi_to_this_person,cnt_poi_from_poi_to_this_person)
mean_poi_from_this_person_to_poi= compute_fraction(sum_poi_from_this_person_to_poi, cnt_poi_from_this_person_to_poi)
mean_poi_to_messages =compute_fraction(sum_poi_to_messages,cnt_poi_to_messages)
mean_poi_from_messages =compute_fraction(sum_poi_from_messages,cnt_poi_from_messages)
mean_poi_shared_receipt_with_poi = compute_fraction(sum_poi_shared_receipt_with_poi,cnt_poi_shared_receipt_with_poi)
for name in my_dataset:
data_point = my_dataset[name]
from_poi_to_this_person = data_point["from_poi_to_this_person"]
from_messages = data_point['from_messages']
to_messages = data_point['to_messages']
from_this_person_to_poi = data_point["from_this_person_to_poi"]
shared_receipt_with_poi = data_point["shared_receipt_with_poi"]
poi = data_point["poi"]
if from_messages == 'NaN' and poi ==False:
data_point["from_messages"] = mean_from_messages
elif from_messages == 'NaN' and poi ==True:
data_point["from_messages"] = mean_poi_from_messages
if to_messages == 'NaN' and poi ==False:
data_point["to_messages"]== mean_to_messages
elif to_messages == 'NaN' and poi ==True:
data_point["to_messages"] = mean_poi_to_messages
if from_poi_to_this_person == 'NaN' and poi ==False:
data_point["from_poi_to_this_person"] =mean_from_poi_to_this_person
elif from_messages == 'NaN' and poi ==True:
data_point["from_poi_to_this_person"] = mean_poi_from_poi_to_this_person
if from_this_person_to_poi == 'NaN' and poi ==False:
data_point["from_this_person_to_poi"] =mean_from_this_person_to_poi
elif from_messages == 'NaN' and poi ==True:
data_point["from_this_person_to_poi"]= mean_poi_from_this_person_to_poi
if shared_receipt_with_poi == 'NaN' and poi ==False:
data_point["shared_receipt_with_poi"] = mean_shared_receipt_with_poi
elif from_messages == 'NaN' and poi ==True:
data_point["shared_receipt_with_poi"]= mean_poi_shared_receipt_with_poi
# In[17]:
print(mean_from_poi_to_this_person , mean_from_this_person_to_poi, mean_to_messages , mean_from_messages)
print(mean_poi_from_poi_to_this_person , mean_poi_from_this_person_to_poi , mean_poi_to_messages,mean_poi_from_messages )
# We add new ratio features :
#
# 1. shared_recepeit with poi
# 2. bonus_to_salary
# 3. payments_to_salary
# 4. ratio mess
# 5. exercised_stock_options
# 6. bonus_to_total
# In[18]:
for name in my_dataset:
data_point = my_dataset[name]
from_poi_to_this_person = data_point["from_poi_to_this_person"]
to_messages = data_point["to_messages"]
fraction_from_poi = compute_fraction(from_poi_to_this_person, to_messages)
data_point["fraction_from_poi"] = fraction_from_poi
from_this_person_to_poi = data_point["from_this_person_to_poi"]
from_messages = data_point["from_messages"]
fraction_to_poi = compute_fraction(from_this_person_to_poi, from_messages)
data_point["fraction_to_poi"] = fraction_to_poi
shared_receipt_with_poi = data_point["shared_receipt_with_poi"]
shared_receipt_poi_ratio = compute_fraction(shared_receipt_with_poi, to_messages)
data_point["shared_receipt_poi_ratio"] = shared_receipt_poi_ratio
bonus= data_point["bonus"]
salary = data_point["salary"]
bonus_to_salary = compute_fraction(bonus, salary)
data_point["bonus_to_salary"] = bonus_to_salary
total_payments = data_point["total_payments"]
bonus_to_total = compute_fraction(bonus, total_payments)
data_point["bonus_to_total"] = bonus_to_total
exercised_stock_options= data_point["exercised_stock_options"]
total_stock_value= data_point["total_stock_value"]
exercised_stock_options_ratio = compute_fraction(exercised_stock_options, total_stock_value)
data_point["exercised_stock_options_ratio"] = exercised_stock_options_ratio
ratio_mess= compute_ratio(from_poi_to_this_person, from_this_person_to_poi,from_messages, to_messages)
data_point["ratio_mess"] = ratio_mess
# Finally, while inquiring we found the members of the board and we wanted to add a feature where we indicate on a person is part of the board.
# In[19]:
for name in my_dataset:
data_point = my_dataset[name]
direction = 0
data_point["direction"] = direction
# In[20]:
list_direction2 = ["LAY KENNETH L","SKILLING JEFFREY K"]
list_direction1 = ["BUY RICHARD B","CAUSEY RICHARD A","DERRICK JR. JAMES V","KEAN STEVEN J","KOENIG MARK E","METTS MARK","FASTOW ANDREW S","BAXTER JOHN C","HORTON STANLEY C","FREVERT MARK A","WHALLEY LAWRENCE G","PAI LOU L","WHITE JR THOMAS E","HIRKO JOSEPH","RICE KENNETH D"]
data_point = my_dataset[name]
for name in my_dataset :
for item in list_direction1 :
if name == item :
direction = 1
my_dataset[name]['direction'] = direction
for item2 in list_direction2 :
if name == item2 :
direction = 2
my_dataset[name]['direction'] = direction
# ### 3.3 create new copies of feature list for grading
# In[21]:
my_feature_list = features_list +[ 'fraction_to_poi','shared_receipt_poi_ratio','bonus_to_salary','bonus_to_total','direction','ratio_mess','exercised_stock_options_ratio']
# In[22]:
features_list
# ### 3.4 get K-best features
# In[23]:
num_features = 10
# ### 3.5 function using SelectKBest
# In[24]:
def get_k_best(data_dict, features_list, k):
""" runs scikit-learn's SelectKBest feature selection
returns dict where keys=features, values=scores
"""
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
k_best = SelectKBest(k=k)
k_best.fit(features, labels)
scores = k_best.scores_
print(scores)
unsorted_pairs = zip(features_list[1:], scores)
sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))
k_best_features = dict(sorted_pairs[:k])
print ("{0} best features: {1}\n".format(k, k_best_features.keys(), scores))
return k_best_features
# => Maybe appropriate stat test for classification.
# In[25]:
best_features = get_k_best(my_dataset, my_feature_list, num_features)
my_feature_list = [target_label] + list(set(best_features.keys()))
# ### 3.6 print features
# In[26]:
print ("{0} selected features: {1}\n".format(len(my_feature_list) - 1, my_feature_list[1:]))
# ### 3.7 extract the features specified in features_list
#
# In[27]:
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
# split into labels and features
# In[28]:
labels, features = targetFeatureSplit(data)
# ### 3.8 scale features via min-max
# In[29]:
from sklearn import preprocessing
scaler = preprocessing.MinMaxScaler()
features = scaler.fit_transform(features)
# ## Task 4: Using algorithm
# Please name your classifier clf for easy export below.
# Note that if you want to do PCA or other multi-stage operations,
# you'll need to use Pipelines. For more info:
# http://scikit-learn.org/stable/modules/pipeline.html
#
# Provided to give you a starting point. Try a variety of classifiers.
# ### 4.1 Gaussian Naive Bayes Classifier
# In[30]:
from sklearn.naive_bayes import GaussianNB
g_clf = GaussianNB()
# ### 4.2 Logistic Regression Classifier
#
# In[31]:
from sklearn.linear_model import LogisticRegression
# In[32]:
l_clf = Pipeline(steps= [
('scaler', StandardScaler()),
('classifier', LogisticRegression(C=1e-08, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1,
max_iter=100, multi_class='ovr', penalty='l2', random_state=42, solver='liblinear', tol=0.001, verbose=0))])
# ### 4.3 K-means Clustering
# In[33]:
from sklearn.cluster import KMeans
k_clf = KMeans(n_clusters=2, tol=0.001)
# ### 4.4 Support Vector Machine Classifier
# In[34]:
from sklearn.svm import SVC
s_clf = SVC(kernel='rbf', C=1000,gamma = 0.0001,random_state = 42, class_weight = 'balanced')
# ### 4.5 Random Forest
#
# In[35]:
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(max_depth = 5,max_features = 'sqrt',n_estimators = 10, random_state = 42)
# ### 4.6 Gradient Boosting Classifier
# In[36]:
from sklearn.ensemble import GradientBoostingClassifier
gb_clf = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=10,random_state = 42)
# ### 4.7 Decision Tree Classifier
# In[37]:
from sklearn.tree import DecisionTreeClassifier
# In[38]:
tre_clf=DecisionTreeClassifier(random_state=42)
# ### 4.8 KNeighborsClassifier
# In[39]:
from sklearn.neighbors import KNeighborsClassifier
# In[40]:
knn_clf = KNeighborsClassifier(n_neighbors=3)
# ### 4.9 Perceptron
# In[41]:
from sklearn.linear_model import Perceptron
pe_clf= Perceptron(max_iter=5)
# ### 4.10 MLP Perceptron
# In[42]:
from sklearn.neural_network import MLPClassifier
# In[43]:
mlp_clf = MLPClassifier(random_state=1)
# ### 4.9 evaluate function
#
# In[44]:
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTETomek
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)
# In[45]:
def evaluate_clf(clf, features, labels, num_iters=1000, test_size=0.3):
print (clf)
accuracy = []
precision = []
recall = []
first = True
for trial in range(num_iters):
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=test_size)
clf.fit(features_train,labels_train)
predictions = clf.predict(features_test)
accuracy.append(accuracy_score(labels_test, predictions))
precision.append(precision_score(labels_test, predictions))
recall.append(recall_score(labels_test, predictions))
if trial % 10 == 0:
if first:
sys.stdout.write('\nProcessing')
sys.stdout.write('.')
sys.stdout.flush()
first = False
print ("done.\n")
print ("precision: {}".format(mean(precision)))
print ("recall: {}".format(mean(recall)))
print ("accuracy: {}".format(mean(accuracy)))
return len(labels_test)
return mean(precision), mean(recall)
# ### 4.8 Evaluate all functions
# In[46]:
evaluate_clf(g_clf, features, labels)
evaluate_clf(l_clf, features, labels)
evaluate_clf(k_clf, features, labels)
evaluate_clf(s_clf, features, labels)
evaluate_clf(rf_clf, features, labels)
evaluate_clf(gb_clf, features, labels)
evaluate_clf(tre_clf, features, labels)
evaluate_clf(knn_clf, features, labels)
evaluate_clf(pe_clf, features, labels)
evaluate_clf(mlp_clf, features, labels)
# ### 5. Hyperparameters tuning
# In[47]:
from sklearn.model_selection import GridSearchCV
import numpy as np
# #### 5.1 Decision tree
We select one of the best model from the evaluate function, in order to tune the hyperparameters.
As a reminder, here are the results obtained without tuning :
precision: 0.4703893550893551
recall: 0.45136847041847045
accuracy: 0.858238095238095
First, we create a pipeline to run a GridSearch on the select K best as to find the best features selection number.
# In[48]:
n_features = np.arange(1, 20)
my_feature_list = features_list +['fraction_to_poi','shared_receipt_poi_ratio','bonus_to_salary','bonus_to_total','direction','ratio_mess','exercised_stock_options_ratio']
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
# Create a pipeline with feature selection and classification
pipe_k1 = Pipeline([
('select_features', SelectKBest()),
('classifier',DecisionTreeClassifier())])
param_grid = [
{
'select_features__k': n_features
}
]
# Use GridSearchCV to automate the process of finding the optimal number of features
cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=67)
k_clf= GridSearchCV(pipe_k1, param_grid=param_grid, scoring='f1', cv = cv)
k_clf.fit(features, labels)
# In[49]:
k_clf.best_score_
# In[50]:
k_clf.best_score_
k_clf.best_estimator_
# In[51]:
num_features=19
# In[52]:
best_features = get_k_best(my_dataset, my_feature_list, num_features)
my_feature_list = [target_label] + list(set(best_features.keys()))
# In[53]:
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
The best number of features is 19.
Now we are interested in the internal parameters of the decision tree.
Then, we launch a new search grid where we test differents parameters as :
- criterion
- max depth
- min samples split
- min samples leaf
The grid search is scored based on the f1 result in order to optimize the recall and the precision.
# In[54]:
clf_parameters = { 'criterion': ['gini', 'entropy'],
'max_depth': [None, 1, 2, 4, 5, 10, 15, 20],
'min_samples_split': [2, 4, 6, 8, 10, 20, 30, 40],
'min_samples_leaf': [1, 2, 3, 4, 5, 6, 7, 8, 10, 20, 30] }
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, random_state=67)
clf = GridSearchCV(DecisionTreeClassifier(), param_grid = clf_parameters, cv = cv, scoring = 'f1')
clf.fit(features,labels)
clf.best_estimator_
# In[55]:
clf.best_params_
The best parameters are : {'criterion': 'gini',
'max_depth': None,
'min_samples_leaf': 1,
'min_samples_split': 20}
We therefore launch the evaluation of our model.
# In[56]:
clf_best_tree=DecisionTreeClassifier(criterion= 'gini',
max_depth = None,
min_samples_leaf = 1,
min_samples_split = 20)
# In[57]:
evaluate_clf(clf_best_tree,features,labels)
With this evaluation, we have a result of :
precision: 0.4970781787656788
recall: 0.6162624458874459
accuracy: 0.8779761904761906
Now we can test with the tester made by our excellent teacher.
# In[58]:
import tester
tester.dump_classifier_and_data(clf_best_tree , my_dataset, my_feature_list)
tester.main()
With the tester, we gain in precision however at the same time we lost in recall.
Our result :
Accuracy: 0.88864 Precision: 0.67971 Recall: 0.41700 F1: 0.51689 F2: 0.45193
# #### 5.2 Log Regression
We have identified that logistic regression is the one of the most efficient model with our dataset. We want to do hyperparameter tuning in order to improve our results. First of all, we want to analyze the best number of parameters to feed our model. Therefore we create a pipeline with a selectKbest with as parameters the numbers of features.
# In[59]:
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
import numpy as np
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
# In[60]:
n_features = np.arange(1, 20)
my_feature_list = features_list +['fraction_to_poi','shared_receipt_poi_ratio','bonus_to_salary','bonus_to_total','direction','ratio_mess','exercised_stock_options_ratio']
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
# Create a pipeline with feature selection and classification
pipe_k = Pipeline([
('scaler', StandardScaler()),
('select_features', SelectKBest()),
('classifier', LogisticRegression())])
param_grid = [
{
'select_features__k': n_features
}
]
# Use GridSearchCV to automate the process of finding the optimal number of features
cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=67)
k_lcf= GridSearchCV(pipe_k, param_grid=param_grid, scoring='f1', cv = cv)
k_lcf.fit(features, labels)
# Use GridSearchCV to automate the process of finding the optimal number of features
cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=67)
k_clf= GridSearchCV(pipe_k1, param_grid=param_grid, scoring='f1', cv = 10)
k_clf.fit(features, labels)
# In[61]:
k_lcf.best_score_
k_lcf.best_estimator_
As we can see, the best results appear with 7 features.
Now we are interested in the internal parameters of the logistic regression.
# In[62]:
num_features=7
# In[63]:
best_features = get_k_best(my_dataset, my_feature_list, num_features)
my_feature_list = [target_label] + list(set(best_features.keys()))
# In[64]:
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
# In[65]:
pipe_log = Pipeline([
('scaler', StandardScaler()),
('classifier', LogisticRegression())])
Now we are interested in the internal parameters of the logistic regression.
Then, we launch a new search grid where we test differents parameters as :
- solvers
- penalty
- c_values
- class weight
- multi class
The grid search is scored based on the f1 result in order to optimize the recall and the precision.
# In[66]:
# define models and parameters
solvers = ['newton-cg', 'lbfgs', 'liblinear']
penalty = ["l1","l2","elasticnet","none"]
c_values = np.logspace(-4, 4, 50)
class_weight=['balanced',None]
multi_class=["ovr"]
# define grid search
grid = dict(classifier__solver=solvers,classifier__penalty=penalty,classifier__C=c_values,classifier__class_weight=class_weight,classifier__multi_class=multi_class)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, train_size=0.7,random_state=1)
grid_search = GridSearchCV(estimator=pipe_log, param_grid=grid, n_jobs=-1, cv=cv,scoring = 'f1')
grid_result = grid_search.fit(features, labels)
# In[67]:
grid_result.best_estimator_
# In[68]:
grid_result.best_params_
The best parameters are : {'C': 0.009102981779915217,
'class weight': None,
'multi_class': 'ovr',
'penalty': 12,
'solver': liblinear }
We therefore launch the evaluation of our model.
# In[69]:
clf_best_log_f1=Pipeline(steps=[('std_slc', StandardScaler()),
('logistic_Reg',
LogisticRegression(C=0.009102981779915217,
class_weight=None, multi_class='ovr',penalty= 'l2',
solver='liblinear', tol=0.001))])
# Then we run the evaluate with our tunes parameters
# In[ ]:
evaluate_clf(clf_best_log_f1,features,labels)
With this evaluation, we have a result of :
precision: 0.5337182012432011
recall: 0.6074877344877344
accuracy: 0.8709285714285716
Now we can test with the tester made by our excellent teacher.
# In[ ]:
import tester
tester.dump_classifier_and_data(clf_best_log_f1 , my_dataset, my_feature_list)
tester.main()
# With the tester, we gain in precision however at the same time we lost in recall.
# Our result :
# Accuracy: 0.88736 Precision: 0.60302 Recall: 0.61900 F1: 0.61091 F2: 0.6157
# #### Percetron
# In[ ]:
n_features = np.arange(1, 20)
my_feature_list = features_list +['fraction_to_poi','shared_receipt_poi_ratio','bonus_to_salary','bonus_to_total','direction','ratio_mess','exercised_stock_options_ratio']
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
# Create a pipeline with feature selection and classification
pipe_p = Pipeline([
('scaler', preprocessing.MinMaxScaler()),
('select_features', SelectKBest()),
('classifier', Perceptron())])
param_grid = [
{
'select_features__k': n_features
}]
# Use GridSearchCV to automate the process of finding the optimal number of features
cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=67)
k_lcf= GridSearchCV(pipe_p, param_grid=param_grid, scoring='f1', cv = cv)
k_lcf.fit(features, labels)
# In[ ]:
k_lcf.best_score_
k_lcf.best_params_
As we can see, the best results appear with 11 features.
Now we are interested in the internal parameters of the logistic regression.
# In[ ]:
num_features=11
# In[ ]:
best_features = get_k_best(my_dataset, my_feature_list, num_features)
my_feature_list = [target_label] + list(set(best_features.keys()))
# In[ ]:
data = featureFormat(my_dataset, my_feature_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
# In[ ]:
pipe_per = Pipeline([
('scaler', preprocessing.MinMaxScaler()),
('classifier', Perceptron())])
# In[ ]:
# define models and parameters
penalty = ["l1","l2","elasticnet","none"]
alpha = np.logspace(-4, 4, 50)
fit_intercept = [True, False]
shuffle = [True, False]
class_weight=['balanced',None]
# define grid search
grid = dict(classifier__penalty=penalty,classifier__alpha=alpha,classifier__class_weight=class_weight,classifier__shuffle=shuffle,classifier__fit_intercept=fit_intercept)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, train_size=0.7,random_state=1)
grid_search = GridSearchCV(estimator=pipe_per, param_grid=grid, n_jobs=-1, cv=cv,scoring = 'f1')
grid_result = grid_search.fit(features, labels)
# In[ ]:
grid_result.best_estimator_
# In[ ]:
grid_result.best_params_
# In[ ]:
clf_best_per_f1=Pipeline(steps=[('scaler', MinMaxScaler()),
('classifier',
Perceptron(alpha=0.0020235896477251557, penalty='l1',
shuffle=False))])
# In[ ]:
evaluate_clf(clf_best_per_f1,features,labels)
# In[ ]:
import tester
tester.dump_classifier_and_data(clf_best_per_f1, my_dataset, my_feature_list)
tester.main()
# Best model after parameters tunning :
# Logistic regression with 11 features.
# #### 5.4 Try staking the 2 models
# In[ ]:
#pip install mlxtend
# In[ ]:
from mlxtend.classifier import StackingClassifier
# In[ ]:
num_features=7
# In[ ]:
best_features = get_k_best(my_dataset, my_feature_list, num_features)
my_feature_list = [target_label] + list(set(best_features.keys()))
# In[ ]:
m_clf = StackingClassifier(classifiers=[clf_best_log_f1,clf_best_tree,clf_best_per_f1],use_probas=False,meta_classifier=clf_best_log_f1)
# In[ ]:
evaluate_clf(m_clf,features,labels)
# In[ ]:
import tester
tester.dump_classifier_and_data(m_clf, my_dataset, my_feature_list)
tester.main()
# #### Our model selection : logistic regression
# Select Logistic Regression as final algorithm
# When we compare the result from the evaluate function and the tester, it seems that our logistic regression model has the best score after paramaters tuning. This result confirms the relevance of our pre-processing step. We don't think that we can increase anymore our recall and precision without more information about the dataset.
# In[ ]:
clf = clf_best_log_f1
# dump your classifier, dataset and features_list so
# anyone can run/check your results
# In[ ]:
pickle.dump(clf, open("../final_project/my_classifier.pkl", "wb"))
pickle.dump(my_dataset, open("../final_project/my_dataset.pkl", "wb"))
pickle.dump(my_feature_list, open("../final_project/my_feature_list.pkl", "wb"))
# ### Task 6: Dump your classifier, dataset, and features_list
# Task 6: Dump your classifier, dataset, and features_list so anyone can
# check your results. You do not need to change anything below, but make sure
# that the version of poi_id.py that you submit can be run on its own and
# generates the necessary .pkl files for validating your results.
# In[ ]:
dump_classifier_and_data(clf, my_dataset, features_list)
# In[ ]:
| [
"[email protected]"
] | |
b7573dbd36081ad7c08df471d5ffe02f5daaaec4 | a596008c186f893d673b2f7bd33b2136db762b7e | /olympic_medals_zoich.py | 7465ace97521a0ce8fb0be052e83c806687b5876 | [] | no_license | iandriyanov/OlympicStats_zoich | f7495ba1ddb6df15ffc3145b7ad544667ea1ad03 | c9e448253e9cf8e41cdfac93fafb402e2a9ade3f | refs/heads/master | 2021-01-25T05:35:09.374474 | 2014-02-14T11:29:00 | 2014-02-14T11:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | #-*- coding: utf-8 -*-
############################################
#
# File Name : olympic_medals_zoich.py
#
# Purpose :
#
# Creation Date : 11-02-2014
#
# Last Modified : Wed 12 Feb 2014 09:12:53 AM MSK
#
# Created By : plushka
#
############################################
# http://olympics.clearlytech.com/api/v1/medals
import requests
req = requests.get("http://olympics.clearlytech.com/api/v1/medals")
header = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<table border="1" width="35%">
<tr>
<td>COUNTRY</td>
<td>GOLD</td>
<td>SILVER</td>
<td>BRONZE</td>
<td>MEDALS</td>
<td>TABLE PLACES</td>
</tr>
"""
footer = """
</table>
</body>
</html>
"""
with open("res.html", "w") as f:
f.write(header)
for answer in req.json():
if answer[u'rank'] > 0 and answer[u'rank'] <= 10:
f.write("""
<tr>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr> """ % (answer[u'country_name'], answer[u'gold_count'], answer[u'silver_count'], answer[u'bronze_count'], answer[u'medal_count'], answer[u'rank']))
f.write(footer)
| [
"[email protected]"
] | |
34f4d0de0e08cbb15c1231e8370fe0c896e3b5a8 | b52230823ef5c1c8791ecb6a9b098c964cf1c413 | /crawling/etf_data_crawl.py | 562fee114fdc07ffc03447cb752032215ee7a763 | [] | no_license | kaya-kim/python | a767dae24687e13443f0d05045bcad170c4d3853 | 83e57fe7a7da854195843dec2aa377614de57606 | refs/heads/main | 2023-08-26T19:52:23.071260 | 2021-11-11T01:56:33 | 2021-11-11T01:56:33 | 426,487,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | import requests
from bs4 import BeautifulSoup
import urllib3
# warnings error 메세지르 안보여주는 것
urllib3.disable_warnings()
#
input = 'VOO'
url = 'https://www.etf.com/'
result_url = url+input
# url = 'http://www.kyobobook.co.kr/bestSellerNew/bestseller.laf'
headers = {
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
}
# 503 error -> header의 User-Agent 추가 필요
# 500 error -> header와 parameter 추가 필요?
response = requests.get(result_url, headers=headers, verify=False)
soup = BeautifulSoup(response.text, "html.parser")
# class 명으로 검색해서 a 코드 사이에 있는 text 가져오기
tags1 = soup.find('div', attrs={'class':'field-content fundReportSegment'}).find("a").get_text()
# 동일한 class 명에서 id로 구분되어있는 것의 a 코드 사이에 있는 text 가져오
tags2 = soup.find('div', id='fundIndexData').find("a").get_text()
print("ETF.com segment:",tags1)
print("Index Tracked:",tags2)
# print(soup.find('div', id='fundIndexData').find("a").get_text())
#
# print("requests header :",response.headers)
# soup = BeautifulSoup(response.text, "html.parser")
# # print("requests find by div class name :",soup.find('div',attrs={'class':'field-content fundReportSegment'}))
# #class="generalData col-md-12 no-padding 0 pull-left col-xs-12 col-sm-12"
# # print("requests find by div class name :",soup.find('div',attrs={'class':"generalData col-md-12 no-padding 0 pull-left col-xs-12 col-sm-12"}))
# print(soup.find('div', id='fundIndexData').find("a").get_text)
# tags1 = soup.find('div',attrs={'class':'field-content fundReportSegment'}).find("a").get_text()
# tags1 = soup.find('div',attrs={'class':'field-content fundReportSegment'}).find('section',attrs={'class':'generalDataBox', 'id':'fundIndexData'})
# print("requests find by div class name and 'a' tag :",tags1)
# print(tags1) | [
"[email protected]"
] | |
834df5a4ec7f57f05c1aabd99b8e4400e81c5ca4 | 1c5a2d92f9ff953629b494cbbf77efc4281d80da | /root/app/local.py | be2b355e741db1ced3615acbd32193bc881ce840 | [] | no_license | fanningert/docker-taiga-backend | d695aadf30928c95a6be7f5b0c828df90b4f7d52 | a3c882982d4e56e57bc1d011ab3dbb285763b557 | refs/heads/master | 2020-03-07T10:22:33.824301 | 2018-03-30T15:38:44 | 2018-03-30T15:38:44 | 127,430,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from .common import *
MEDIA_URL = "http://example.com/media/"
STATIC_URL = "http://example.com/static/"
SITES["front"]["scheme"] = "http"
SITES["front"]["domain"] = "example.com"
SECRET_KEY = "theveryultratopsecretkey"
DEBUG = False
PUBLIC_REGISTER_ENABLED = True
DEFAULT_FROM_EMAIL = "[email protected]"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
#CELERY_ENABLED = True
EVENTS_PUSH_BACKEND = "taiga.events.backends.rabbitmq.EventsPushBackend"
EVENTS_PUSH_BACKEND_OPTIONS = {"url": "amqp://taiga:PASSWORD_FOR_EVENTS@localhost:5672/taiga"}
# Uncomment and populate with proper connection parameters
# for enable email sending. EMAIL_HOST_USER should end by @domain.tld
#EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
#EMAIL_USE_TLS = False
#EMAIL_HOST = "localhost"
#EMAIL_HOST_USER = ""
#EMAIL_HOST_PASSWORD = ""
#EMAIL_PORT = 25
# Uncomment and populate with proper connection parameters
# for enable github login/singin.
#GITHUB_API_CLIENT_ID = "yourgithubclientid"
#GITHUB_API_CLIENT_SECRET = "yourgithubclientsecret"
| [
"[email protected]"
] | |
f50a62262f8a5fd229e3a174e46c8c9fedf3c950 | cef09d1e6d5e7cd335387d0829211ffb0da18f48 | /tests2/tests/wedge100/test_psumuxmon.py | 73784296b42bf03dd786c25cca01bc61c37967ce | [] | no_license | theopolis/openbmc | a1ef2e3335efd19bf750117d79c1477d47948ff3 | 1784748ba29ee89bccacb2019a0bb86bd181c651 | refs/heads/master | 2020-12-14T07:20:40.273681 | 2019-04-20T05:25:17 | 2019-04-20T05:25:17 | 43,323,632 | 0 | 1 | null | 2015-09-28T19:56:24 | 2015-09-28T19:56:24 | null | UTF-8 | Python | false | false | 2,143 | py | #!/usr/bin/env python
#
# Copyright 2018-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import unittest
import os
import re
from utils.shell_util import run_shell_cmd
from utils.cit_logger import Logger
class PsumuxmonTest(unittest.TestCase):
def setUp(self):
Logger.start(name=__name__)
def tearDown(self):
Logger.info("Finished logging for {}".format(self._testMethodName))
pass
def test_psumuxmon_runit_sv_status(self):
cmd = ["/usr/bin/sv status psumuxmon"]
data = run_shell_cmd(cmd)
self.assertIn("run", data, "psumuxmon process not running")
def get_ltc_hwmon_path(self, path):
pcard_vin = None
result = re.split("hwmon", path)
if os.path.isdir(result[0]):
construct_hwmon_path = result[0] + "hwmon"
x = None
for x in os.listdir(construct_hwmon_path):
if x.startswith('hwmon'):
construct_hwmon_path = construct_hwmon_path + "/" + x + "/" + result[2].split("/")[1]
return construct_hwmon_path
return None
def test_psumuxmon_ltc_sensor_path_exists(self):
# Based on lab device deployment, sensor data might not be accessible.
# Verify that path exists
cmd = "/sys/bus/i2c/devices/7-006f/hwmon/hwmon*/in1_input"
self.assertTrue(os.path.exists(self.get_ltc_hwmon_path(cmd)),
"psumuxmon LTC sensor path accessible")
| [
"[email protected]"
] | |
3dc697da8fe16d89cbc36d7223f0cb3c6dd04be9 | 682cffe10c368b059bf24af4ebe2a475430f4ab6 | /vspkgenerator/vanilla/python/__overrides/nuvsdsession.override.py | b608b74bc90dad5af0adaf76cddf83b5135a2d41 | [
"BSD-3-Clause"
] | permissive | mkarnam/vspkgenerator | adedda8d5d6dd38d046680842a0891a314c7fbbb | c35586826a0e5261c4ce88c5827f22941249bc3f | refs/heads/master | 2022-12-07T22:29:17.482983 | 2022-11-23T10:35:12 | 2022-11-23T10:35:12 | 62,943,040 | 0 | 0 | BSD-3-Clause | 2022-11-23T10:35:13 | 2016-07-09T09:48:27 | HTML | UTF-8 | Python | false | false | 53 | py | @property
def user(self):
return self.root_object | [
"[email protected]"
] | |
803c12056e1bb1f8bb8a7ab3310523f027750019 | 338a11833d8e83dd0e4580ab3dc21b95fe17183b | /logica.py | 145a353284a5785f04491bdf85f74a8b95240a4a | [] | no_license | MaBlestastic/UML-TiendaElectronica | 6f3294a68dca2ca9fc796669307886d108e0a32f | 73a119e3224accdb9ffc90e4cb832f76590a8995 | refs/heads/main | 2023-09-06T00:47:24.907642 | 2021-11-13T00:04:01 | 2021-11-13T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import beconnect
def gestionarProv (nombreprod):
beconnect.Mostrar("SELECT nombreprod FROM producto WHERE nombreprod = "+ nombreprod )
pass
def controlarProd():
pass
def comprarProd():
pass
def controlarStockProd():
pass
def venderCliente():
pass
def reservarProd():
pass
def gestionarProv ():
Nombre = input ( "xd \t" )
Descripcion= input ("xd \t")
sql = "INSERT INTO producto (nombreprod,descripprod) VALUES (%s,%s)"
val= [(Nombre,Descripcion)]
beconnect.EjecutarSQL_VAL(sql, val)
gestionarProv () | [
"[email protected]"
] | |
33f504c5e1c391f90e11226e1c15be67091ee79f | 0124528676ee3bbaec60df5d6950b408e6da37c8 | /Projects/QTPy/circuitpython-community-bundle-7.x-mpy-20220601/examples/animation/main.py | ee50a4f811bdd29fdf5d3d51de532f353ba0b5a1 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | land-boards/lb-boards | 8127658dc537dcfde0bb59a5018ab75c3f0087f6 | eeb98cc2003dac1924845d949f6f5bd387376568 | refs/heads/master | 2023-06-07T15:44:46.110742 | 2023-06-02T22:53:24 | 2023-06-02T22:53:24 | 4,847,305 | 10 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import board
import dotstar_featherwing
wing = dotstar_featherwing.DotstarFeatherwing(board.D13, board.D11)
xmas_colors = {'w': ( 32, 32, 32),
'W': (255, 255, 255),
'G': ( 0, 32, 0),
'y': ( 32, 32, 0),
'Y': (255, 255, 0)}
xmas_animation = [["..y.w......w",
"..G.....w...",
"..G..w....w.",
".GGG...w....",
"GGGGG.......",
"wwwwwwwwwwww"],
["..y.........",
"..G.W......w",
"..G.....w...",
".GGG.w....W.",
"GGGGG..w....",
"wwwwwwwwwwww"],
["..Y....W....",
"..G.........",
"..G.w......w",
".GGG....w...",
"GGGGGw....W.",
"wwwwwwwwwwww"],
["..y..w....w.",
"..G....W....",
"..G.........",
".GGGW......w",
"GGGGG...w...",
"wwwwwwwwwwww"],
["..Y.....w...",
"..G..w....W.",
"..G....w....",
".GGG........",
"GGGGG......W",
"wwwwwwwwwwww"]]
wing.display_animation(xmas_animation, xmas_colors, 10000, 0.05)
| [
"[email protected]"
] | |
b7d5c92398dbcae7d70b09607ef8e5cd5221e0f7 | d2e3cd42cd150f09f4bdc82286248d692ac46195 | /networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py | f548fca021c4f13b306a9e1263079ffe8fc30470 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bangtree/networkx | 7414f13c20ec600822b7de41cb8188f9651cf256 | b37d5931d1d162e98c7c5f10b2f6c7030cc187cf | refs/heads/master | 2022-12-05T19:21:53.915903 | 2022-12-02T02:44:29 | 2022-12-02T02:44:29 | 29,867,402 | 0 | 0 | null | 2015-01-26T15:35:12 | 2015-01-26T15:35:11 | null | UTF-8 | Python | false | false | 9,716 | py | import networkx as nx
from networkx.algorithms.isomorphism.vf2pp import (
_GraphParameters,
_initialize_parameters,
_StateParameters,
)
from networkx.algorithms.isomorphism.vf2pp_helpers.state import (
_restore_Tinout,
_restore_Tinout_Di,
_update_Tinout,
)
class TestGraphTinoutUpdating:
edges = [
(1, 3),
(2, 3),
(3, 4),
(4, 9),
(4, 5),
(3, 9),
(5, 8),
(5, 7),
(8, 7),
(6, 7),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
G1 = nx.Graph()
G1.add_edges_from(edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, mapping=mapped)
def test_updating(self):
G2_degree = dict(self.G2.degree)
gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree)
m, m_rev, T1, _, T1_tilde, _, T2, _, T2_tilde, _ = sparams
# Add node to the mapping
m[4] = self.mapped[4]
m_rev[self.mapped[4]] = 4
_update_Tinout(4, self.mapped[4], gparams, sparams)
assert T1 == {3, 5, 9}
assert T2 == {"c", "i", "e"}
assert T1_tilde == {0, 1, 2, 6, 7, 8}
assert T2_tilde == {"x", "a", "b", "f", "g", "h"}
# Add node to the mapping
m[5] = self.mapped[5]
m_rev.update({self.mapped[5]: 5})
_update_Tinout(5, self.mapped[5], gparams, sparams)
assert T1 == {3, 9, 8, 7}
assert T2 == {"c", "i", "h", "g"}
assert T1_tilde == {0, 1, 2, 6}
assert T2_tilde == {"x", "a", "b", "f"}
# Add node to the mapping
m[6] = self.mapped[6]
m_rev.update({self.mapped[6]: 6})
_update_Tinout(6, self.mapped[6], gparams, sparams)
assert T1 == {3, 9, 8, 7}
assert T2 == {"c", "i", "h", "g"}
assert T1_tilde == {0, 1, 2}
assert T2_tilde == {"x", "a", "b"}
# Add node to the mapping
m[3] = self.mapped[3]
m_rev.update({self.mapped[3]: 3})
_update_Tinout(3, self.mapped[3], gparams, sparams)
assert T1 == {1, 2, 9, 8, 7}
assert T2 == {"a", "b", "i", "h", "g"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Add node to the mapping
m[0] = self.mapped[0]
m_rev.update({self.mapped[0]: 0})
_update_Tinout(0, self.mapped[0], gparams, sparams)
assert T1 == {1, 2, 9, 8, 7}
assert T2 == {"a", "b", "i", "h", "g"}
assert T1_tilde == set()
assert T2_tilde == set()
def test_restoring(self):
m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"}
m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6}
T1 = {1, 2, 7, 9, 8}
T2 = {"a", "b", "g", "i", "h"}
T1_tilde = set()
T2_tilde = set()
gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {})
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
# Remove a node from the mapping
m.pop(0)
m_rev.pop("x")
_restore_Tinout(0, self.mapped[0], gparams, sparams)
assert T1 == {1, 2, 7, 9, 8}
assert T2 == {"a", "b", "g", "i", "h"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Remove a node from the mapping
m.pop(6)
m_rev.pop("f")
_restore_Tinout(6, self.mapped[6], gparams, sparams)
assert T1 == {1, 2, 7, 9, 8}
assert T2 == {"a", "b", "g", "i", "h"}
assert T1_tilde == {0, 6}
assert T2_tilde == {"x", "f"}
# Remove a node from the mapping
m.pop(3)
m_rev.pop("c")
_restore_Tinout(3, self.mapped[3], gparams, sparams)
assert T1 == {7, 9, 8, 3}
assert T2 == {"g", "i", "h", "c"}
assert T1_tilde == {0, 6, 1, 2}
assert T2_tilde == {"x", "f", "a", "b"}
# Remove a node from the mapping
m.pop(5)
m_rev.pop("e")
_restore_Tinout(5, self.mapped[5], gparams, sparams)
assert T1 == {9, 3, 5}
assert T2 == {"i", "c", "e"}
assert T1_tilde == {0, 6, 1, 2, 7, 8}
assert T2_tilde == {"x", "f", "a", "b", "g", "h"}
# Remove a node from the mapping
m.pop(4)
m_rev.pop("d")
_restore_Tinout(4, self.mapped[4], gparams, sparams)
assert T1 == set()
assert T2 == set()
assert T1_tilde == set(self.G1.nodes())
assert T2_tilde == set(self.G2.nodes())
class TestDiGraphTinoutUpdating:
edges = [
(1, 3),
(3, 2),
(3, 4),
(4, 9),
(4, 5),
(3, 9),
(5, 8),
(5, 7),
(8, 7),
(7, 6),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
G1 = nx.DiGraph(edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, mapping=mapped)
def test_updating(self):
G2_degree = {
n: (in_degree, out_degree)
for (n, in_degree), (_, out_degree) in zip(
self.G2.in_degree, self.G2.out_degree
)
}
gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree)
m, m_rev, T1_out, T1_in, T1_tilde, _, T2_out, T2_in, T2_tilde, _ = sparams
# Add node to the mapping
m[4] = self.mapped[4]
m_rev[self.mapped[4]] = 4
_update_Tinout(4, self.mapped[4], gparams, sparams)
assert T1_out == {5, 9}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6, 7, 8}
assert T2_tilde == {"x", "a", "b", "f", "g", "h"}
# Add node to the mapping
m[5] = self.mapped[5]
m_rev[self.mapped[5]] = 5
_update_Tinout(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6}
assert T2_tilde == {"x", "a", "b", "f"}
# Add node to the mapping
m[6] = self.mapped[6]
m_rev[self.mapped[6]] = 6
_update_Tinout(6, self.mapped[6], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3, 7}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c", "g"}
assert T1_tilde == {0, 1, 2}
assert T2_tilde == {"x", "a", "b"}
# Add node to the mapping
m[3] = self.mapped[3]
m_rev[self.mapped[3]] = 3
_update_Tinout(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Add node to the mapping
m[0] = self.mapped[0]
m_rev[self.mapped[0]] = 0
_update_Tinout(0, self.mapped[0], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == set()
assert T2_tilde == set()
def test_restoring(self):
m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"}
m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6}
T1_out = {2, 7, 9, 8}
T1_in = {1, 7}
T2_out = {"b", "g", "i", "h"}
T2_in = {"a", "g"}
T1_tilde = set()
T2_tilde = set()
gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {})
sparams = _StateParameters(
m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None
)
# Remove a node from the mapping
m.pop(0)
m_rev.pop("x")
_restore_Tinout_Di(0, self.mapped[0], gparams, sparams)
assert T1_out == {2, 7, 9, 8}
assert T1_in == {1, 7}
assert T2_out == {"b", "g", "i", "h"}
assert T2_in == {"a", "g"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Remove a node from the mapping
m.pop(6)
m_rev.pop("f")
_restore_Tinout_Di(6, self.mapped[6], gparams, sparams)
assert T1_out == {2, 9, 8, 7}
assert T1_in == {1}
assert T2_out == {"b", "i", "h", "g"}
assert T2_in == {"a"}
assert T1_tilde == {0, 6}
assert T2_tilde == {"x", "f"}
# Remove a node from the mapping
m.pop(3)
m_rev.pop("c")
_restore_Tinout_Di(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "h", "g"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2}
assert T2_tilde == {"x", "f", "a", "b"}
# Remove a node from the mapping
m.pop(5)
m_rev.pop("e")
_restore_Tinout_Di(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 5}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2, 8, 7}
assert T2_tilde == {"x", "f", "a", "b", "h", "g"}
# Remove a node from the mapping
m.pop(4)
m_rev.pop("d")
_restore_Tinout_Di(4, self.mapped[4], gparams, sparams)
assert T1_out == set()
assert T1_in == set()
assert T2_out == set()
assert T2_in == set()
assert T1_tilde == set(self.G1.nodes())
assert T2_tilde == set(self.G2.nodes())
| [
"[email protected]"
] | |
6695fe7d91cb42829879fd44facc222b44f31ffd | 04add342c1ad999a510f4b7f39870d408d038b13 | /0x1C-island_perimeter/0-island_perimeter.py | 4947c615bbe30df52a48440979b021b9ca328262 | [] | no_license | hamdi458/holbertonschool-interview | eca60f762e3089f064c83049a3e0097c59f955b2 | 1a5eea7c5f1556e4bdf23db10e4611ea9ca3c22a | refs/heads/main | 2023-08-07T19:39:39.746020 | 2021-09-24T12:38:10 | 2021-09-24T12:38:10 | 320,259,571 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | #!/usr/bin/python3
"""the perimeter of the island described in grid"""
def island_perimeter(grid):
"""returns the perimeter of the island described in grid:
grid is a list of list of integers:
0 represents water
1 represents land
Each cell is square, with a side length of 1
Cells are connected horizontally/vertically (not diagonally).
grid is rectangular, with its width and height not exceeding 100
The grid is completely surrounded by water
There is only one island (or nothing).
The island doesn’t have “lakes” (water inside that isn’t
connected to the water surrounding the island)."""
sum = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
if i == 0 or grid[i - 1][j] == 0:
sum += 1
if j == 0 or grid[i][j - 1] == 0:
sum += 1
if i == len(grid) - 1 or grid[i + 1][j] == 0:
sum += 1
if j == len(grid[i]) - 1 or grid[i][j + 1] == 0:
sum += 1
return sum
| [
"[email protected]"
] | |
2b19f94d126f21b48d19683f2785c9ea50a508a4 | 24653fc7753145833651a39c5ccfd2dce9776ef9 | /tests/test_kms/test_model.py | 5d0ffc0978aeb6a962d1f2ed7df60755752a3331 | [
"Apache-2.0"
] | permissive | cm-iwata/moto | fd47802b7bdec567eef575a14109a5fb0c92eea4 | 9640ec20d125248ac91243591c7db50daabfd135 | refs/heads/master | 2022-07-13T23:21:56.898602 | 2022-06-13T10:14:22 | 2022-06-13T10:14:22 | 143,237,437 | 0 | 0 | Apache-2.0 | 2018-08-02T03:27:08 | 2018-08-02T03:27:08 | null | UTF-8 | Python | false | false | 1,147 | py | import pytest
from moto.kms.models import KmsBackend
PLAINTEXT = b"text"
REGION = "us-east-1"
@pytest.fixture
def backend():
return KmsBackend(REGION)
@pytest.fixture
def key(backend):
return backend.create_key(
None, "ENCRYPT_DECRYPT", "SYMMETRIC_DEFAULT", "Test key", None, REGION
)
def test_encrypt_key_id(backend, key):
ciphertext, arn = backend.encrypt(key.id, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_key_arn(backend, key):
ciphertext, arn = backend.encrypt(key.arn, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_name(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt("alias/test/test", PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_arn(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt(
f"arn:aws:kms:{REGION}:{key.account_id}:alias/test/test", PLAINTEXT, {}
)
assert ciphertext is not None
assert arn == key.arn
| [
"[email protected]"
] | |
cb07a323abf8740806bebc941c841ab0e659081b | e6ad1014aacaa92643f42952c278469177defc15 | /napalm_ansible/napalm_diff_yang.py | d134e9bb1a69665bbfabcb13f326bcf956c8cb1d | [
"Apache-2.0"
] | permissive | cspeidel/napalm-ansible | d290ee7cc1abd9dd7d11044d5ddc542bd6658906 | 8ad4badb38d79ec5efd96faa666c71f7438dfa28 | refs/heads/develop | 2022-02-09T05:40:10.302690 | 2017-11-06T20:51:58 | 2017-11-06T20:51:58 | 110,727,639 | 0 | 0 | Apache-2.0 | 2022-01-31T16:25:25 | 2017-11-14T18:18:35 | Python | UTF-8 | Python | false | false | 3,409 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2017 David Barroso <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
from ansible.module_utils.basic import AnsibleModule
try:
import napalm_yang
except ImportError:
napalm_yang = None
DOCUMENTATION = '''
---
module: napalm_diff_yang
author: "David Barroso (@dbarrosop)"
version_added: "0.0"
short_description: "Return diff of two YANG objects"
description:
- "Create two YANG objects from dictionaries and runs mehtod"
- "napalm_yang.utils.diff on them."
requirements:
- napalm-yang
options:
models:
description:
- List of models to parse
required: True
first:
description:
- Dictionary with the data to load into the first YANG object
required: True
second:
description:
- Dictionary with the data to load into the second YANG object
required: True
'''
EXAMPLES = '''
napalm_diff_yang:
first: "{{ candidate.yang_model }}"
second: "{{ running_config.yang_model }}"
models:
- models.openconfig_interfaces
register: diff
'''
RETURN = '''
diff:
description: "Same output as the method napalm_yang.utils.diff"
returned: always
type: dict
sample: {
"interfaces": {
"interface": {
"both": {
"Port-Channel1": {
"config": {
"description": {
"first": "blah",
"second": "Asadasd"
}
}
}
}
}
}
'''
def get_root_object(models):
"""
Read list of models and returns a Root object with the proper models added.
"""
root = napalm_yang.base.Root()
for model in models:
current = napalm_yang
for p in model.split("."):
current = getattr(current, p)
root.add_model(current)
return root
def main():
module = AnsibleModule(
argument_spec=dict(
models=dict(type="list", required=True),
first=dict(type='dict', required=True),
second=dict(type='dict', required=True),
),
supports_check_mode=True
)
if not napalm_yang:
module.fail_json(msg="the python module napalm-yang is required")
first = get_root_object(module.params["models"])
first.load_dict(module.params["first"])
second = get_root_object(module.params["models"])
second.load_dict(module.params["second"])
diff = napalm_yang.utils.diff(first, second)
module.exit_json(yang_diff=diff)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
34bb012d42ec90f93b307b447f5c5cd8c6a26646 | c7a1c1ae40e9d95dfb92251dcfbf3c5010e6ba81 | /sensehat/pi_surveillance_py.py | 260dc24e20057985e9e1a46675745b948e2da882 | [] | no_license | pranavlathigara/Raspberry-Pi-DIY-Projects | efd18e2e5b9b8369bb1a5f5418782480cf9bc729 | 0c14c316898d4d06015912ac4a8cb7b71a3980c0 | refs/heads/master | 2021-04-06T09:14:28.088223 | 2018-02-19T00:15:22 | 2018-02-19T00:15:22 | 124,649,553 | 1 | 2 | null | 2018-03-10T11:30:59 | 2018-03-10T11:30:59 | null | UTF-8 | Python | false | false | 3,605 | py | from pyimagesearch.tempimage import TempImage
import dropbox as dbx
from picamera.array import PiRGBArray
from picamera import PiCamera
import warnings
import datetime
import imutils
import json
import time
import cv2
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
client = None
# Put your token here:
db = dbx.Dropbox("YOUR_TOKEN_HERE")
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640,480)
camera.framerate = 16
rawCapture = PiRGBArray(camera, size=(640,480))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print "[INFO] warming up..."
time.sleep(2.5)
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image and initialize
# the timestamp and occupied/unoccupied text
frame = f.array
timestamp = datetime.datetime.now()
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print "[INFO] starting background model..."
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the difference between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, 5, 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < 5000:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "!"
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "{}".format(ts), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# check to see if the room is occupied
if text == "!":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= 3.0:
# increment the motion counter
motionCounter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= 8:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
print "[UPLOAD] {}".format(ts)
path = "{base_path}/{timestamp}.jpg".format(base_path="/", timestamp=ts)
client.put_file(open(t.path, "rb").read(), path)
t.cleanup()
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occupied
else:
motionCounter = 0
# clear the stream in preparation for the next frame
rawCapture.truncate(0) | [
"[email protected]"
] | |
eecaffdbe17ebf356d4729447b601c155f4a4f9d | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/metric/metrics.py | aeec4022e218424eb20183b6917aa2f39a17d588 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 28,411 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
import numpy as np
from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, _varbase_creator, _non_static_mode, _in_legacy_dygraph
import paddle
from paddle import _C_ops, _legacy_C_ops
__all__ = []
def _is_numpy_(var):
return isinstance(var, (np.ndarray, np.generic))
@six.add_metaclass(abc.ABCMeta)
class Metric(object):
r"""
Base class for metric, encapsulates metric logic and APIs
Usage:
.. code-block:: text
m = SomeMetric()
for prediction, label in ...:
m.update(prediction, label)
m.accumulate()
Advanced usage for :code:`compute`:
Metric calculation can be accelerated by calculating metric states
from model outputs and labels by build-in operators not by Python/NumPy
in :code:`compute`, metric states will be fetched as NumPy array and
call :code:`update` with states in NumPy format.
Metric calculated as follows (operations in Model and Metric are
indicated with curly brackets, while data nodes not):
.. code-block:: text
inputs & labels || ------------------
| ||
{model} ||
| ||
outputs & labels ||
| || tensor data
{Metric.compute} ||
| ||
metric states(tensor) ||
| ||
{fetch as numpy} || ------------------
| ||
metric states(numpy) || numpy data
| ||
{Metric.update} \/ ------------------
Examples:
For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`
as inputs, we can calculate the correct prediction matrix between
:code:`pred` and :code:`label` in :code:`compute`.
For examples, prediction results contains 10 classes, while :code:`pred`
shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size,
and we only need to calculate accurary of top-1 and top-5, we could
calculate the correct prediction matrix of the top-5 scores of the
prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5].
.. code-block:: text
def compute(pred, label):
# sort prediction and slice the top-5 scores
pred = paddle.argsort(pred, descending=True)[:, :5]
# calculate whether the predictions are correct
correct = pred == label
return paddle.cast(correct, dtype='float32')
With the :code:`compute`, we split some calculations to OPs (which
may run on GPU devices, will be faster), and only fetch 1 tensor with
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
:code:`update` can be define as follows:
.. code-block:: text
def update(self, correct):
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[:, :k].sum()
num_samples = len(correct)
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
return accs
"""
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
"""
Reset states and result
"""
raise NotImplementedError(
"function 'reset' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def update(self, *args):
"""
Update states for metric
Inputs of :code:`update` is the outputs of :code:`Metric.compute`,
if :code:`compute` is not defined, the inputs of :code:`update`
will be flatten arguments of **output** of mode and **label** from data:
:code:`update(output1, output2, ..., label1, label2,...)`
see :code:`Metric.compute`
"""
raise NotImplementedError(
"function 'update' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def accumulate(self):
"""
Accumulates statistics, computes and returns the metric value
"""
raise NotImplementedError(
"function 'accumulate' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def name(self):
"""
Returns metric name
"""
raise NotImplementedError(
"function 'name' not implemented in {}.".format(
self.__class__.__name__))
def compute(self, *args):
"""
This API is advanced usage to accelerate metric calculating, calulations
from outputs of model to the states which should be updated by Metric can
be defined here, where Paddle OPs is also supported. Outputs of this API
will be the inputs of "Metric.update".
If :code:`compute` is defined, it will be called with **outputs**
of model and **labels** from data as arguments, all outputs and labels
will be concatenated and flatten and each filed as a separate argument
as follows:
:code:`compute(output1, output2, ..., label1, label2,...)`
If :code:`compute` is not defined, default behaviour is to pass
input to output, so output format will be:
:code:`return output1, output2, ..., label1, label2,...`
see :code:`Metric.update`
"""
return args
class Accuracy(Metric):
"""
Encapsulates accuracy metric logic.
Args:
topk (list[int]|tuple[int]): Number of top elements to look at
for computing accuracy. Default is (1,).
name (str, optional): String name of the metric instance. Default
is `acc`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
m = paddle.metric.Accuracy()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # 0.75
Example with Model API:
.. code-block:: python
import paddle
from paddle.static import InputSpec
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform)
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
model.fit(train_dataset, batch_size=64)
"""
def __init__(self, topk=(1, ), name=None, *args, **kwargs):
super(Accuracy, self).__init__(*args, **kwargs)
self.topk = topk
self.maxk = max(topk)
self._init_name(name)
self.reset()
def compute(self, pred, label, *args):
"""
Compute the top-k (maximum value in `topk`) indices.
Args:
pred (Tensor): The predicted value is a Tensor with dtype
float32 or float64. Shape is [batch_size, d0, ..., dN].
label (Tensor): The ground truth value is Tensor with dtype
int64. Shape is [batch_size, d0, ..., 1], or
[batch_size, d0, ..., num_classes] in one hot representation.
Return:
Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
"""
pred = paddle.argsort(pred, descending=True)
pred = paddle.slice(pred,
axes=[len(pred.shape) - 1],
starts=[0],
ends=[self.maxk])
if (len(label.shape) == 1) or \
(len(label.shape) == 2 and label.shape[-1] == 1):
# In static mode, the real label data shape may be different
# from shape defined by paddle.static.InputSpec in model
# building, reshape to the right shape.
label = paddle.reshape(label, (-1, 1))
elif label.shape[-1] != 1:
# one-hot label
label = paddle.argmax(label, axis=-1, keepdim=True)
correct = pred == label
return paddle.cast(correct, dtype='float32')
def update(self, correct, *args):
"""
Update the metrics states (correct count and total count), in order to
calculate cumulative accuracy of all instances. This function also
returns the accuracy of current step.
Args:
correct: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
Return:
Tensor: the accuracy of current step.
"""
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[..., :k].sum()
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
accs = accs[0] if len(self.topk) == 1 else accs
return accs
def reset(self):
"""
Resets all of the metric state.
"""
self.total = [0.] * len(self.topk)
self.count = [0] * len(self.topk)
def accumulate(self):
"""
Computes and returns the accumulated metric.
"""
res = []
for t, c in zip(self.total, self.count):
r = float(t) / c if c > 0 else 0.
res.append(r)
res = res[0] if len(self.topk) == 1 else res
return res
def _init_name(self, name):
name = name or 'acc'
if self.maxk != 1:
self._name = ['{}_top{}'.format(name, k) for k in self.topk]
else:
self._name = [name]
def name(self):
"""
Return name of metric instance.
"""
return self._name
class Precision(Metric):
"""
Precision (also called positive predictive value) is the fraction of
relevant instances among the retrieved instances. Refer to
https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers
Noted that this class manages the precision score only for binary
classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `precision`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
m = paddle.metric.Precision()
m.update(x, y)
res = m.accumulate()
print(res) # 1.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=paddle.metric.Precision())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='precision', *args, **kwargs):
super(Precision, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fp = 0 # false positive
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds (numpy.ndarray): The prediction result, usually the output
of two-class sigmoid function. It should be a vector (column
vector or row vector) with data type: 'float64' or 'float32'.
labels (numpy.ndarray): The ground truth (labels),
the shape should keep the same as preds.
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.floor(preds + 0.5).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if pred == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fp = 0
def accumulate(self):
"""
Calculate the final precision.
Returns:
A scaler float: results of the calculated precision.
"""
ap = self.tp + self.fp
return float(self.tp) / ap if ap != 0 else .0
def name(self):
"""
Returns metric name
"""
return self._name
class Recall(Metric):
"""
Recall (also known as sensitivity) is the fraction of
relevant instances that have been retrieved over the
total amount of relevant instances
Refer to:
https://en.wikipedia.org/wiki/Precision_and_recall
Noted that this class manages the recall score only for
binary classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `recall`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
m = paddle.metric.Recall()
m.update(x, y)
res = m.accumulate()
print(res) # 2.0 / 3.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='recall', *args, **kwargs):
super(Recall, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fn = 0 # false negative
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds(numpy.array): prediction results of current mini-batch,
the output of two-class sigmoid function.
Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'.
labels(numpy.array): ground truth (labels) of current mini-batch,
the shape should keep the same as preds.
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.rint(preds).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if label == 1:
if pred == label:
self.tp += 1
else:
self.fn += 1
def accumulate(self):
"""
Calculate the final recall.
Returns:
A scaler float: results of the calculated Recall.
"""
recall = self.tp + self.fn
return float(self.tp) / recall if recall != 0 else .0
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fn = 0
def name(self):
"""
Returns metric name
"""
return self._name
class Auc(Metric):
"""
The auc metric is for binary classification.
Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve.
Please notice that the auc metric is implemented with python, which may be a little bit slow.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
Args:
curve (str): Specifies the mode of the curve to be computed,
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
num_thresholds (int): The number of thresholds to use when
discretizing the roc curve. Default is 4095.
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
name (str, optional): String name of the metric instance. Default
is `auc`.
"NOTE: only implement the ROC curve type via Python now."
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
m = paddle.metric.Auc()
n = 8
class0_preds = np.random.random(size = (n, 1))
class1_preds = 1 - class0_preds
preds = np.concatenate((class0_preds, class1_preds), axis=1)
labels = np.random.randint(2, size = (n, 1))
m.update(preds=preds, labels=labels)
res = m.accumulate()
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 2), nn.Softmax())
)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
model.prepare(
optim,
loss=loss,
metrics=paddle.metric.Auc())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self,
curve='ROC',
num_thresholds=4095,
name='auc',
*args,
**kwargs):
super(Auc, self).__init__(*args, **kwargs)
self._curve = curve
self._num_thresholds = num_thresholds
_num_pred_buckets = num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
self._name = name
def update(self, preds, labels):
"""
Update the auc curve with the given predictions and labels.
Args:
preds (numpy.array): An numpy array in the shape of
(batch_size, 2), preds[i][j] denotes the probability of
classifying the instance i into the class j.
labels (numpy.array): an numpy array in the shape of
(batch_size, 1), labels[i] is either o or 1,
representing the label of the instance i.
"""
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
for i, lbl in enumerate(labels):
value = preds[i, 1]
bin_idx = int(value * self._num_thresholds)
assert bin_idx <= self._num_thresholds
if lbl:
self._stat_pos[bin_idx] += 1.0
else:
self._stat_neg[bin_idx] += 1.0
@staticmethod
def trapezoid_area(x1, x2, y1, y2):
return abs(x1 - x2) * (y1 + y2) / 2.0
def accumulate(self):
"""
Return the area (a float score) under auc curve
Return:
float: the area under auc curve
"""
tot_pos = 0.0
tot_neg = 0.0
auc = 0.0
idx = self._num_thresholds
while idx >= 0:
tot_pos_prev = tot_pos
tot_neg_prev = tot_neg
tot_pos += self._stat_pos[idx]
tot_neg += self._stat_neg[idx]
auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos,
tot_pos_prev)
idx -= 1
return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0
def reset(self):
"""
Reset states and result
"""
_num_pred_buckets = self._num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
def name(self):
"""
Returns metric name
"""
return self._name
def accuracy(input, label, k=1, correct=None, total=None, name=None):
"""
accuracy layer.
Refer to the https://en.wikipedia.org/wiki/Precision_and_recall
This function computes the accuracy using the input and label.
If the correct label occurs in top k predictions, then correct will increment by one.
Note: the dtype of accuracy is determined by input. the input and label dtype can be different.
Args:
input(Tensor): The input of accuracy layer, which is the predictions of network. A Tensor with type float32,float64.
The shape is ``[sample_number, class_dim]`` .
label(Tensor): The label of dataset. Tensor with type int64 or int32. The shape is ``[sample_number, 1]`` .
k(int, optional): The top k predictions for each class will be checked. Data type is int64 or int32.
correct(Tensor, optional): The correct predictions count. A Tensor with type int64 or int32.
total(Tensor, optional): The total entries count. A tensor with type int64 or int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the correct rate. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle
predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32')
label = paddle.to_tensor([[2], [0]], dtype="int64")
result = paddle.metric.accuracy(input=predictions, label=label, k=1)
# [0.5]
"""
if label.dtype == paddle.int32:
label = paddle.cast(label, paddle.int64)
if _non_static_mode():
if correct is None:
correct = _varbase_creator(dtype="int32")
if total is None:
total = _varbase_creator(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k)
_acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label,
correct, total)
return _acc
helper = LayerHelper("accuracy", **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'accuracy')
topk_out, topk_indices = paddle.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None:
correct = helper.create_variable_for_type_inference(dtype="int32")
if total is None:
total = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
| [
"[email protected]"
] | |
af5b7418a1bc4303059f8d4db3cfc71b3f4ce50c | 575cf5976450e5e901d8c642c3795f2610ed0545 | /client.py | 3a30ff0c7a41c24d11bd17cd85a764a4bc4161ab | [] | no_license | mgcarbonell/quick-socketio | 99ef3ccb62999fda3a7a4f87656dad6ae079f3d9 | a41eafb583f1c19f5e95bdc9b4505f5876b8a576 | refs/heads/main | 2023-03-02T22:25:50.602284 | 2021-01-23T05:26:21 | 2021-01-23T05:26:21 | 332,128,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | # similar to our serer, with some minor
# differences. Rather than socket.ioServer
# we use socket.ioClient.
# we still have code that is event driven,
# and this is important with socketio.
# Rather than using cb's in JS, we can use
# call in socket.ioClient().
# note that these clients will also work
# in the cli
import asyncio
import sys
import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connected')
result = sio.call('sum', {'numbers': [1, 2]})
print(result)
@sio.event
def connect_error(e):
print(e)
@sio.event
def disconnect():
print('disconnected')
@sio.event
def mult(data):
return data['numbers'][0] * data['numbers'][1]
@sio.event
def client_count(count):
print('There are', count, 'connected clients.')
@sio.event
def room_count(count):
print('There are', count, 'clients in my room.')
@sio.event:
def user_joined(username)
print('User', username, 'has joined.')
@sio.event
def user_left(username)
print('User', username, 'has left.')
def main(username):
sio.connect('http://localhost:8000',
headers={'X-Username': username})
sio.wait()
# sio.wait will wait until the connect ends.
if __name__ == '__main__'
main(sys.argv[1] if lens(sys.argv) > 1 else None)
# take 1st argument from command line if available
# set it to none. Test for rejected connection. | [
"[email protected]"
] | |
9b6a313c4143391d0e759e966d2a74b8e14b3fb2 | 6cee35876c6a1afdc1a2f9293fbcf41719f3852d | /chap_2/exercise2.py | c89851bc22530e3167a8bbdee2b1449bc3979f7b | [] | no_license | SiddhantAshtekar/python-algorithem-for-begginers | a7c31cd2cd96d70e13a2d0119da94fe7f38c5056 | 07803850aa78c07ce608d18173afebd398543121 | refs/heads/master | 2020-05-07T10:28:20.310114 | 2019-04-09T17:33:19 | 2019-04-09T17:33:19 | 180,417,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | name=input("Enter your name ")
print(f"the revers of your name is {name[-1::-1]}")#revers of sting | [
"[email protected]"
] | |
52ff29d17d4c9860ec70d330a6cd99fcc5d6bcdd | ad6ea76d377a7e920ec0ad858195ca2bccc9519f | /day_10/re模块.py | ebbd39c57c82dd43dc2afa5910d19c8da5d3b80b | [] | no_license | wugelihai/month_01 | a4713ba62d1f9d966bde5002c3d08556af0a7c44 | bd33a4008467007b2ad80be8eeb17ed4f43bd6f7 | refs/heads/master | 2023-01-06T18:21:48.329832 | 2020-11-10T10:01:03 | 2020-11-10T10:01:03 | 303,683,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,550 | py | import re
"""
匹配单个字符
"""
# 使用match方法进行操作,匹配不到就会报错
# result = re.match("it", "itcast.cn")
# info = result.group()
# print(info)
# . 匹配任意一个字符,除了\n
# ret = re.match(".", "M")
# print(ret.group())
# ret = re.match("w.o", "weo")
# print(ret.group())
# []匹配中括号中列举的字符
# ret = re.match("[hH]ello", "hello world")
# print(ret.group())
# 这里表示两段序列,从0-4到6-9,如果选择5是匹配不到的
# ret = re.match("[0-46-9]", "7我的世界")
# print(ret.group())
# \d 匹配数字
# ret = re.match("嫦娥\d号", "嫦娥1号发射成功")
# print(ret.group())
# \D匹配非数字
# match_obj = re.match("\D","fwoda")
# if match_obj:
# print(match_obj.group())
# else:
# print("匹配失败")
# \s 匹配空白字符,即空格,tab键,回车键
# ret = re.match("hello\sworld","hello\nworld")
# print(ret.group())
# \S匹配非空白
# ret = re.match("hello\Sworld","hello$world")
# print(ret.group())
# \w匹配非特殊字符,下划线属于非特殊字符_
# ret = re.match("hello\wworld","hello_world")
# print(ret.group())
# \W匹配特殊字符
# ret = re.match("hello\Wworld","hello^world")
# print(ret.group())
"""
匹配多个字符
"""
# * 匹配*号的前一个字符出现0次或者无限次,即可有可无
# ret = re.match("[A-Z][a-z]*","MBBB")
# print(ret.group())
#
# ret = re.match("[A-Z][a-z]*","Mabcdefg")
# print(ret.group())
#
# ret = re.match("[A-Z][a-z]*","MabcdefG")
# print(ret.group())
# +匹配加号的前一个字符出现1次或者无限次,即至少有1次
# match_obj = re.match("t.+o", "two")
# print(match_obj.group())
# ?匹配前一个字符出现1次或者0次,要么1次,要么没有
# match_obj = re.match("https?", "http")
# print(match_obj.group())
# {m} 匹配前一个字符出现m次
# ret = re.match("[a-zA-Z0-9]{6}","wodshijieshishenme")
# print(ret.group())
# {m,n}匹配前一个字符出现从m到n次
# ret = re.match("[a-zA-Z0-9]{6,11}","wodeshijie")
# print(ret.group())
# 匹配字符串的开头和结尾
# 检查是否以什么开头:用^...这种写法
# ret = re.match("^[hH]", "hello world")
# print(ret.group())
# 检查字符串是否以数字开头,并打印出整个字符串
# match_obj = re.match("^\d.*","1hello")
# print(match_obj.group())
# 检查字符串是否以什么结尾,$
# match_obj = re.match(".*\d$","1hello234")
# print(match_obj.group())
# 匹配以数字开头,中间内容不影响,数字结尾
# match_obj = re.match("^\d.*\d$","1hello234")
# print(match_obj.group())
# 除了指定字符都匹配[^指定字符]
# match_obj = re.match("[^aeiou].*","wodeshijie")
# print(match_obj.group())
# 匹配163邮箱
# match_obj = re.match("^.*@163.com$", "[email protected]")
# print(match_obj.group())
# 匹配11位手机号
# match_obj = re.match("[0-9]{1,11}", "13955448220")
# print(match_obj.group())
# 匹配微博话题
# match_obj = re.match("^#.*#$", "#特朗普#")
# print(match_obj.group())
# 匹配分组
# | 匹配左右任意一个表达式
# fruit_list = ["apple", "banana", "orange", "pear"]
# for i in fruit_list:
# match_obj = re.match("apple|pear", i)
# if match_obj:
# print("%s这个是我需要的" % match_obj.group())
#
# else:
# print("%s这个不是我需要的" % i)
# (ab) 将括号中的字符作为一个分组
# 匹配出163,126,qq邮箱等
# match_obj = re.match("[a-zA-Z0-9_]{4,20}@(163|126|qq|sina|yahoo)\.com","[email protected]")
# if match_obj:
# print(match_obj.group())
# # 获取分组数据,默认是一个分组,多个分组从左到右依次增加
# print(match_obj.group(1))
# else:
# print("匹配失败")
# 匹配qq:1233444这样的数据,提取出qq和qq号码
# match_obj = re.match("(qq):([0-9]\d{4,11})","qq:2223224182")
# print(match_obj.group())
# 分组,默认1是第一个分组,分组从左到右依次增加,()是分组的单位
# print(match_obj.group(1))
# print(match_obj.group(2))
# \num 引用分组num匹配到字符串
# match_obj = re.match("<[a-zA-Z1-6]+>.*</[a-zA-Z1-6]+>", "<html>hh</div>")
# print(match_obj.group())
# if match_obj:
# print(match_obj.group())
# else:
# print("匹配失败")
# 就是匹配第n个分组,与上面的()分组相结合的
match_obj = re.match("<([a-zA-Z1-6]+)><([a-zA-Z1-6]+)>.*</\\2></\\1>", "<html><h1>hh</h1></html>")
print(match_obj.group())
# (?P<name>)分别起组名
# (?P=name)引用别名为name分组匹配到的字符串
| [
"[email protected]"
] | |
5387260b0ece475f0630b5bce216b990dc590b25 | dbd848387ab3379627e14aaf5cfaa832449b3bda | /tests/test_core_socks_async_trio.py | 13d57e1d3d48e62ce6a282d6f4bf46d12f15ee89 | [] | no_license | Sweety1337/py-socks-updated | 9940b1256eee6db80a9b170574b90d7ccf617dd1 | ddda6575368022107143245787beed90e4a277fa | refs/heads/master | 2022-12-16T21:17:55.894217 | 2020-09-24T14:22:30 | 2020-09-24T14:22:30 | 298,301,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,899 | py | import socket
import ssl
import trio # noqa
import pytest # noqa
from yarl import URL # noqa
from python_socks import (
ProxyType,
ProxyError,
ProxyTimeoutError,
ProxyConnectionError
)
from python_socks._proxy_async import AsyncProxy # noqa
from python_socks.async_.trio import Proxy
from python_socks.async_ import ProxyChain
# noinspection PyUnresolvedReferences,PyProtectedMember
from python_socks._resolver_async_trio import Resolver
from tests.conftest import (
SOCKS5_IPV4_HOST, SOCKS5_IPV4_PORT, LOGIN, PASSWORD, SKIP_IPV6_TESTS,
SOCKS5_IPV4_URL, SOCKS5_IPV4_URL_WO_AUTH, SOCKS5_IPV6_URL, SOCKS4_URL,
HTTP_PROXY_URL
)
# TEST_URL = 'https://httpbin.org/ip'
TEST_URL = 'https://check-host.net/ip'
async def make_request(proxy: AsyncProxy,
url: str, resolve_host=False, timeout=None):
url = URL(url)
dest_host = url.host
if resolve_host:
resolver = Resolver()
_, dest_host = await resolver.resolve(url.host)
sock: socket.socket = await proxy.connect(
dest_host=dest_host,
dest_port=url.port,
timeout=timeout
)
ssl_context = None
if url.scheme == 'https':
ssl_context = ssl.create_default_context()
stream = trio.SocketStream(sock)
if ssl_context is not None:
stream = trio.SSLStream(
stream, ssl_context,
server_hostname=url.host
)
await stream.do_handshake()
request = (
'GET {rel_url} HTTP/1.1\r\n'
'Host: {host}\r\n'
'Connection: close\r\n\r\n'
)
request = request.format(rel_url=url.path_qs, host=url.host)
request = request.encode('ascii')
await stream.send_all(request)
response = await stream.receive_some(1024)
status_line = response.split(b'\r\n', 1)[0]
status_line = status_line.decode('utf-8', 'surrogateescape')
version, status_code, *reason = status_line.split()
return int(status_code)
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.parametrize('resolve_host', (True, False))
@pytest.mark.trio
async def test_socks5_proxy_ipv4(rdns, resolve_host):
proxy = Proxy.from_url(SOCKS5_IPV4_URL, rdns=rdns)
status_code = await make_request(
proxy=proxy,
url=TEST_URL,
resolve_host=resolve_host
)
assert status_code == 200
@pytest.mark.parametrize('rdns', (None, True, False))
@pytest.mark.trio
async def test_socks5_proxy_ipv4_with_auth_none(rdns):
proxy = Proxy.from_url(SOCKS5_IPV4_URL_WO_AUTH, rdns=rdns)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.trio
async def test_socks5_proxy_with_invalid_credentials():
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=SOCKS5_IPV4_PORT,
username=LOGIN,
password=PASSWORD + 'aaa',
)
with pytest.raises(ProxyError):
await make_request(proxy=proxy, url=TEST_URL)
@pytest.mark.trio
async def test_socks5_proxy_with_connect_timeout():
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=SOCKS5_IPV4_PORT,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(ProxyTimeoutError):
await make_request(proxy=proxy, url=TEST_URL, timeout=0.0001)
@pytest.mark.trio
async def test_socks5_proxy_with_invalid_proxy_port(unused_tcp_port):
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=unused_tcp_port,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(ProxyConnectionError):
await make_request(proxy=proxy, url=TEST_URL)
@pytest.mark.skipif(SKIP_IPV6_TESTS, reason='TravisCI doesn`t support ipv6')
@pytest.mark.trio
async def test_socks5_proxy_ipv6():
proxy = Proxy.from_url(SOCKS5_IPV6_URL)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.parametrize('rdns', (None, True, False))
@pytest.mark.parametrize('resolve_host', (True, False))
@pytest.mark.trio
async def test_socks4_proxy(rdns, resolve_host):
proxy = Proxy.from_url(SOCKS4_URL, rdns=rdns)
status_code = await make_request(
proxy=proxy,
url=TEST_URL,
resolve_host=resolve_host
)
assert status_code == 200
@pytest.mark.trio
async def test_http_proxy():
proxy = Proxy.from_url(HTTP_PROXY_URL)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.trio
async def test_proxy_chain():
proxy = ProxyChain([
Proxy.from_url(SOCKS5_IPV4_URL),
Proxy.from_url(SOCKS4_URL),
Proxy.from_url(HTTP_PROXY_URL),
])
# noinspection PyTypeChecker
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
| [
"[email protected]"
] | |
bbe890a4b95caa07771ae822a39d98792ff5c420 | 6f763105d564ee80d2b129b4c1337f6e26027b77 | /utils/processing.py | 65d49eb493e1c934552b8410ae49bc4775ae4e5c | [
"MIT"
] | permissive | pidgpidg/Fanfiction-Finder | e2915d0ce8d036620442f5461d80fcd7021fd7ba | c1f12ac4ca30aa1787e3a1f24c9c246d1c8d4d1b | refs/heads/main | 2023-07-10T09:02:08.659169 | 2021-08-20T03:11:20 | 2021-08-20T03:11:20 | 398,138,707 | 0 | 0 | MIT | 2021-08-20T03:03:08 | 2021-08-20T03:03:08 | null | UTF-8 | Python | false | false | 2,251 | py | from dateutil.relativedelta import relativedelta
from datetime import datetime
from bs4 import BeautifulSoup
import re
def story_last_up_clean(story_last_up, _type):
curr_time = datetime.now()
if _type == 1: # ffn last updated
datetimeFormat = '%Y-%m-%d %H:%M:%S'
story_last_up = datetime.strptime(
str(story_last_up), datetimeFormat)
story_last_updated = story_last_up.strftime(r'%-d %b, %Y ')
elif _type == 2: # ao3 last updated
datetimeFormat = '%Y-%m-%d'
story_last_up = datetime.strptime(
str(story_last_up), datetimeFormat)
story_last_updated = story_last_up.strftime(r'%-d %b, %Y ')
diff_in_time = relativedelta(curr_time, story_last_up)
# only amend hours & minutes diff
if diff_in_time.years:
pass
elif diff_in_time.months:
pass
elif diff_in_time.days:
pass
elif diff_in_time.hours:
if diff_in_time.hours == 1:
story_last_updated += "☘︎ " + str(diff_in_time.hours)+" hour ago"
else:
story_last_updated += "☘︎ " + str(diff_in_time.hours)+" hours ago"
elif diff_in_time.minutes:
if diff_in_time.minutes == 1:
story_last_updated += "☘︎ " + \
str(diff_in_time.minutes)+" minute ago"
else:
story_last_updated += "☘︎ " + \
str(diff_in_time.minutes)+" minutes ago"
return str(story_last_updated)
def get_ao3_series_works_index(ao3_soup):
ao3_series_works_html = []
ao3_series_works_index = []
ao3_series_works_html_h4 = ao3_soup.findAll(
'h4', attrs={'class': 'heading'})
for i in ao3_series_works_html_h4:
ao3_series_works_html.append(i)
ao3_series_works_html = ""
for i in ao3_series_works_html_h4:
ao3_series_works_html += str(i)
soup_work = BeautifulSoup(ao3_series_works_html, 'html.parser')
for tag in soup_work.findAll('a', {'href': re.compile('/works/')}):
ao3_series_works_index.append(
"["+tag.text+"](https://archiveofourown.org"+tag['href']+")") # inline html tag for embed
ao3_series_works_index = '\n'.join(ao3_series_works_index)
return ao3_series_works_index
| [
"[email protected]"
] | |
0b122012c36b3cd5dad4e207579418712c3535ca | 642e8d6d8cd8d08a73bdcf82ae9689a09284025c | /celery/worker/__init__.py | 96b994779744ff87efee9a3dcbecee7745c8b868 | [
"BSD-3-Clause"
] | permissive | abecciu/celery | 941f29c033b54b766166f17aa8c5e4be05df08b9 | f0c399e34d56c7a2a14cb42bfb2b6455c68ef0c0 | refs/heads/master | 2021-01-14T12:57:11.230199 | 2009-09-10T13:44:51 | 2009-09-10T13:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,215 | py | """
The Multiprocessing Worker Server
Documentation for this module is in ``docs/reference/celery.worker.rst``.
"""
from carrot.connection import DjangoBrokerConnection, AMQPConnectionException
from celery.worker.controllers import Mediator, PeriodicWorkController
from celery.worker.job import TaskWrapper
from celery.exceptions import NotRegistered
from celery.messaging import get_consumer_set
from celery.conf import DAEMON_CONCURRENCY, DAEMON_LOG_FILE
from celery.conf import AMQP_CONNECTION_RETRY, AMQP_CONNECTION_MAX_RETRIES
from celery.log import setup_logger
from celery.pool import TaskPool
from celery.utils import retry_over_time
from celery.datastructures import SharedCounter
from Queue import Queue
import traceback
import logging
import socket
class AMQPListener(object):
"""Listen for messages received from the AMQP broker and
move them the the bucket queue for task processing.
:param bucket_queue: See :attr:`bucket_queue`.
:param hold_queue: See :attr:`hold_queue`.
.. attribute:: bucket_queue
The queue that holds tasks ready for processing immediately.
.. attribute:: hold_queue
The queue that holds paused tasks. Reasons for being paused include
a countdown/eta or that it's waiting for retry.
.. attribute:: logger
The logger used.
"""
def __init__(self, bucket_queue, hold_queue, logger,
initial_prefetch_count=2):
self.amqp_connection = None
self.task_consumer = None
self.bucket_queue = bucket_queue
self.hold_queue = hold_queue
self.logger = logger
self.prefetch_count = SharedCounter(initial_prefetch_count)
def start(self):
"""Start the consumer.
If the connection is lost, it tries to re-establish the connection
over time and restart consuming messages.
"""
while True:
self.reset_connection()
try:
self.consume_messages()
except (socket.error, AMQPConnectionException):
self.logger.error("AMQPListener: Connection to broker lost. "
+ "Trying to re-establish connection...")
def consume_messages(self):
"""Consume messages forever (or until an exception is raised)."""
task_consumer = self.task_consumer
self.logger.debug("AMQPListener: Starting message consumer...")
it = task_consumer.iterconsume(limit=None)
self.logger.debug("AMQPListener: Ready to accept tasks!")
while True:
self.task_consumer.qos(prefetch_count=int(self.prefetch_count))
it.next()
def stop(self):
"""Stop processing AMQP messages and close the connection
to the broker."""
self.close_connection()
def receive_message(self, message_data, message):
"""The callback called when a new message is received.
If the message has an ``eta`` we move it to the hold queue,
otherwise we move it the bucket queue for immediate processing.
"""
try:
task = TaskWrapper.from_message(message, message_data,
logger=self.logger)
except NotRegistered, exc:
self.logger.error("Unknown task ignored: %s" % (exc))
return
eta = message_data.get("eta")
if eta:
self.prefetch_count.increment()
self.logger.info("Got task from broker: %s[%s] eta:[%s]" % (
task.task_name, task.task_id, eta))
self.hold_queue.put((task, eta, self.prefetch_count.decrement))
else:
self.logger.info("Got task from broker: %s[%s]" % (
task.task_name, task.task_id))
self.bucket_queue.put(task)
def close_connection(self):
"""Close the AMQP connection."""
if self.task_consumer:
self.task_consumer.close()
self.task_consumer = None
if self.amqp_connection:
self.logger.debug(
"AMQPListener: Closing connection to the broker...")
self.amqp_connection.close()
self.amqp_connection = None
def reset_connection(self):
"""Reset the AMQP connection, and reinitialize the
:class:`carrot.messaging.ConsumerSet` instance.
Resets the task consumer in :attr:`task_consumer`.
"""
self.logger.debug(
"AMQPListener: Re-establishing connection to the broker...")
self.close_connection()
self.amqp_connection = self._open_connection()
self.task_consumer = get_consumer_set(connection=self.amqp_connection)
self.task_consumer.register_callback(self.receive_message)
def _open_connection(self):
"""Retries connecting to the AMQP broker over time.
See :func:`celery.utils.retry_over_time`.
"""
def _connection_error_handler(exc, interval):
"""Callback handler for connection errors."""
self.logger.error("AMQP Listener: Connection Error: %s. " % exc
+ "Trying again in %d seconds..." % interval)
def _establish_connection():
"""Establish a connection to the AMQP broker."""
conn = DjangoBrokerConnection()
connected = conn.connection # Connection is established lazily.
return conn
if not AMQP_CONNECTION_RETRY:
return _establish_connection()
conn = retry_over_time(_establish_connection, socket.error,
errback=_connection_error_handler,
max_retries=AMQP_CONNECTION_MAX_RETRIES)
self.logger.debug("AMQPListener: Connection Established.")
return conn
class WorkController(object):
"""Executes tasks waiting in the task queue.
:param concurrency: see :attr:`concurrency`.
:param logfile: see :attr:`logfile`.
:param loglevel: see :attr:`loglevel`.
.. attribute:: concurrency
The number of simultaneous processes doing work (default:
:const:`celery.conf.DAEMON_CONCURRENCY`)
.. attribute:: loglevel
The loglevel used (default: :const:`logging.INFO`)
.. attribute:: logfile
The logfile used, if no logfile is specified it uses ``stderr``
(default: :const:`celery.conf.DAEMON_LOG_FILE`).
.. attribute:: logger
The :class:`logging.Logger` instance used for logging.
.. attribute:: is_detached
Flag describing if the worker is running as a daemon or not.
.. attribute:: pool
The :class:`multiprocessing.Pool` instance used.
.. attribute:: bucket_queue
The :class:`Queue.Queue` that holds tasks ready for immediate
processing.
.. attribute:: hold_queue
The :class:`Queue.Queue` that holds paused tasks. Reasons for holding
back the task include waiting for ``eta`` to pass or the task is being
retried.
.. attribute:: periodic_work_controller
Instance of :class:`celery.worker.controllers.PeriodicWorkController`.
.. attribute:: mediator
Instance of :class:`celery.worker.controllers.Mediator`.
.. attribute:: amqp_listener
Instance of :class:`AMQPListener`.
"""
loglevel = logging.ERROR
concurrency = DAEMON_CONCURRENCY
logfile = DAEMON_LOG_FILE
_state = None
def __init__(self, concurrency=None, logfile=None, loglevel=None,
is_detached=False):
# Options
self.loglevel = loglevel or self.loglevel
self.concurrency = concurrency or self.concurrency
self.logfile = logfile or self.logfile
self.is_detached = is_detached
self.logger = setup_logger(loglevel, logfile)
# Queues
self.bucket_queue = Queue()
self.hold_queue = Queue()
self.logger.debug("Instantiating thread components...")
# Threads+Pool
self.periodic_work_controller = PeriodicWorkController(
self.bucket_queue,
self.hold_queue)
self.pool = TaskPool(self.concurrency, logger=self.logger)
self.amqp_listener = AMQPListener(self.bucket_queue, self.hold_queue,
logger=self.logger,
initial_prefetch_count=concurrency)
self.mediator = Mediator(self.bucket_queue, self.safe_process_task)
# The order is important here;
# the first in the list is the first to start,
# and they must be stopped in reverse order.
self.components = [self.pool,
self.mediator,
self.periodic_work_controller,
self.amqp_listener]
def start(self):
"""Starts the workers main loop."""
self._state = "RUN"
try:
for component in self.components:
self.logger.debug("Starting thread %s..." % \
component.__class__.__name__)
component.start()
finally:
self.stop()
def safe_process_task(self, task):
"""Same as :meth:`process_task`, but catches all exceptions
the task raises and log them as errors, to make sure the
worker doesn't die."""
try:
try:
self.process_task(task)
except Exception, exc:
self.logger.critical("Internal error %s: %s\n%s" % (
exc.__class__, exc, traceback.format_exc()))
except (SystemExit, KeyboardInterrupt):
self.stop()
def process_task(self, task):
"""Process task by sending it to the pool of workers."""
task.execute_using_pool(self.pool, self.loglevel, self.logfile)
def stop(self):
"""Gracefully shutdown the worker server."""
# shut down the periodic work controller thread
if self._state != "RUN":
return
[component.stop() for component in reversed(self.components)]
self._state = "STOP"
| [
"[email protected]"
] | |
4ffcafc58e0e171a78a295d77ad213c80a5bb0e5 | 5d2404f62e58d5fd1f6112744ff32c3166183ac7 | /Exercicios/ex036.py | 6fc9f4561d2c0ecd7c5e81514824facf4042177e | [] | no_license | Leownhart/My_Course_of_python | 236cfc84d841c5883e5aa1cc0c0730e7a9a83c40 | 5abb21f8cdad91ab54247a007d40bf9ecd2cff8c | refs/heads/master | 2020-08-28T15:04:33.628086 | 2020-08-24T19:25:39 | 2020-08-24T19:25:39 | 217,733,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | valorcasa = float(input('Informe o valor da imovel: R$ '))
salcom = float(input('Informe o sálario do comprador: R$ '))
anos = int(input('Informe o tempo de financiamento em anos: '))
valpresta = (valorcasa / anos) / 12 # casa / (anos / * 12)
porcent = salcom * 30.0 / 100
print('Para pagar uma casa de R${:.2f} em {} anos a '
'prestação será de R${:.2f} mensais'.format(valorcasa, anos, valpresta))
if valpresta > porcent:
print('\033[31mEmpréstimo NEGADO!\033[m')
else:
print('\033[32mEmpréstimo APROVADO!\033[m')
| [
"[email protected]"
] | |
a9edbeaf88bade93d05aedb3c436f9b864421475 | 5139e63dfbc2b01a10b20bdd283005bfb64bc3e1 | /api/api.py | 998d101f9f00203f1225a882ce89d54334c0ff78 | [] | no_license | Merevoli-DatLuu/SGUInfo | 121098a67128d3ede72ce9f9f51955637c22fb9c | 501d676ad1e02f00573cc879fbba6c44ab1b0ffb | refs/heads/master | 2023-05-26T08:50:41.899513 | 2021-01-11T16:11:45 | 2021-01-11T16:11:45 | 281,350,587 | 4 | 1 | null | 2023-05-22T23:38:11 | 2020-07-21T09:13:00 | Python | UTF-8 | Python | false | false | 1,848 | py | from flask import Flask, render_template, request, jsonify
import sys
sys.path.append("..")
from sguinfo import sguinfo
app = Flask(__name__)
@app.route("/api/v1/students", methods=['GET'])
def get_student_list():
sgu = sguinfo()
if "from_id" in request.args and "to_id" in request.args and "id_list" not in request.args:
from_id = request.args['from_id']
to_id = request.args['to_id']
if sgu.validate_range_mssv(from_id, to_id):
data = []
for d in sgu.find_range_info(from_id, to_id):
data.append(sgu.change_to_eng_info(d))
return jsonify(data)
else:
return jsonify({})
elif "from_id" not in request.args and "to_id" not in request.args and "id_list" in request.args:
list_id = request.args['id_list'].split(",")
data = []
for id in list_id:
if sgu.validate_mssv(id):
data.append(sgu.change_to_eng_info(sgu.find_info(id)))
return jsonify(data)
else:
return jsonify({})
@app.route("/api/v1/students/<id>", methods = ['GET'])
def get_a_student(id):
sgu = sguinfo()
if sgu.validate_mssv(id):
return jsonify(sgu.change_to_eng_info(sgu.find_info(id)))
else:
return jsonify({})
@app.route("/api/v1/students/<id>/<param>", methods = ['GET'])
def get_a_student_with_param(id, param):
sgu = sguinfo()
if sgu.validate_mssv(id):
data = sgu.change_to_eng_info(sgu.find_info(id))
if param in data.keys():
return jsonify(data[param])
else:
return jsonify({})
else:
return jsonify({})
@app.route("/test")
def tessst():
return request.args
if __name__ == '__main__':
app.config['JSON_AS_ASCII'] = False
app.config['JSON_SORT_KEYS'] = False
app.run(debug = True) | [
"[email protected]"
] | |
3283469a70b4bbabeeb43e1231327fd4c353e862 | 09795109710fa6d1b5062c592065f855f1f5cc7d | /test/form_largest.py | 797018345a9f97c7649c240e0af01ba6a8628101 | [] | no_license | ganpatagarwal/python-scripts | 8acd2093dcd8331676f3ab100fc22b9ac4d26786 | df02b20a67216c79addf1c75fbe62379dc054820 | refs/heads/master | 2021-01-23T08:56:32.722753 | 2018-08-13T09:50:43 | 2018-08-13T09:50:43 | 15,274,529 | 0 | 0 | null | 2016-01-21T11:00:45 | 2013-12-18T05:27:08 | Python | UTF-8 | Python | false | false | 514 | py | #l = [100,1,2,2,6,67,3,4,30,34,42,45,5,5,20,9]
l = [3, 30, 34, 5, 9]
#sorting the initial list
l.sort()
print l
l2 = []
d= {}
#taking out the first digit from each number
for item in l:
tmp = str(item)
d[tmp[0]] = item
keys = sorted(d.keys())
#print keys
for i in range (len(keys)):
val = keys[i]
for item in l:
tmp = str(item)
if tmp[0] == str(val):
l2.append(str(item))
print l2
#creatng the biggest possible integer
s = ''
i = len(l2) - 1
while(i >=0):
s += str(l2[i])
i -= 1
print s
| [
"[email protected]"
] | |
997a2a9aa16da7c874e599ae181d4bd45503f1e8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EDataServer/SourceCredentialsProviderImpl.py | 281ee12030f4bf3eeecff51d446fa85a2b655621 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 17,806 | py | # encoding: utf-8
# module gi.repository.EDataServer
# from /usr/lib64/girepository-1.0/EDataServer-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Soup as __gi_repository_Soup
import gobject as __gobject
from .Extension import Extension
class SourceCredentialsProviderImpl(Extension):
"""
:Constructors:
::
SourceCredentialsProviderImpl(**properties)
"""
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def can_process(self, source): # real signature unknown; restored from __doc__
""" can_process(self, source:EDataServer.Source) -> bool """
return False
def can_prompt(self): # real signature unknown; restored from __doc__
""" can_prompt(self) -> bool """
return False
def can_store(self): # real signature unknown; restored from __doc__
""" can_store(self) -> bool """
return False
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def delete_sync(self, source, cancellable=None): # real signature unknown; restored from __doc__
""" delete_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool """
return False
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def do_can_process(self, *args, **kwargs): # real signature unknown
""" can_process(self, source:EDataServer.Source) -> bool """
pass
def do_can_prompt(self, *args, **kwargs): # real signature unknown
""" can_prompt(self) -> bool """
pass
def do_can_store(self, *args, **kwargs): # real signature unknown
""" can_store(self) -> bool """
pass
def do_delete_sync(self, *args, **kwargs): # real signature unknown
""" delete_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_lookup_sync(self, *args, **kwargs): # real signature unknown
""" lookup_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool, out_credentials:EDataServer.NamedParameters """
pass
def do_store_sync(self, *args, **kwargs): # real signature unknown
""" store_sync(self, source:EDataServer.Source, credentials:EDataServer.NamedParameters, permanently:bool, cancellable:Gio.Cancellable=None) -> bool """
pass
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_extensible(self): # real signature unknown; restored from __doc__
""" get_extensible(self) -> EDataServer.Extensible """
pass
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_provider(self): # real signature unknown; restored from __doc__
""" get_provider(self) """
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def lookup_sync(self, source, cancellable=None): # real signature unknown; restored from __doc__
""" lookup_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool, out_credentials:EDataServer.NamedParameters """
return False
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def store_sync(self, source, credentials, permanently, cancellable=None): # real signature unknown; restored from __doc__
""" store_sync(self, source:EDataServer.Source, credentials:EDataServer.NamedParameters, permanently:bool, cancellable:Gio.Cancellable=None) -> bool """
return False
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7f626e8ec550>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(SourceCredentialsProviderImpl), '__module__': 'gi.repository.EDataServer', '__gtype__': <GType ESourceCredentialsProviderImpl (94877537146240)>, '__doc__': None, '__gsignals__': {}, 'can_process': gi.FunctionInfo(can_process), 'can_prompt': gi.FunctionInfo(can_prompt), 'can_store': gi.FunctionInfo(can_store), 'delete_sync': gi.FunctionInfo(delete_sync), 'get_provider': gi.FunctionInfo(get_provider), 'lookup_sync': gi.FunctionInfo(lookup_sync), 'store_sync': gi.FunctionInfo(store_sync), 'do_can_process': gi.VFuncInfo(can_process), 'do_can_prompt': gi.VFuncInfo(can_prompt), 'do_can_store': gi.VFuncInfo(can_store), 'do_delete_sync': gi.VFuncInfo(delete_sync), 'do_lookup_sync': gi.VFuncInfo(lookup_sync), 'do_store_sync': gi.VFuncInfo(store_sync), 'parent': <property object at 0x7f626e926e00>, 'priv': <property object at 0x7f626e926ef0>})"
__gdoc__ = 'Object ESourceCredentialsProviderImpl\n\nProperties from EExtension:\n extensible -> EExtensible: Extensible Object\n The object being extended\n\nSignals from GObject:\n notify (GParam)\n\n'
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType ESourceCredentialsProviderImpl (94877537146240)>'
__info__ = ObjectInfo(SourceCredentialsProviderImpl)
| [
"[email protected]"
] | |
e309f63fc32bf788b5f2230bd49a429597fac3cb | ecc65625665286428b1080ee425cc809742e0dcc | /python programs/12th/7.py | b27253215343a4b2a3172737b0ada16fdfd0549e | [] | no_license | RJ-VARMA/11th-cbse-programs | 72a204aa90b3a9ae8cfb7e120ed61fd77c9f326d | 3dad091537872e8aa9028c9e7eddd7e96337bbde | refs/heads/main | 2023-08-22T06:38:46.499429 | 2021-10-18T03:18:12 | 2021-10-18T03:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import csv
login = False
answer = input("Do you have an account?(yes or no) ")
if answer == 'yes' :
with open('upassword.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile)
username = input("Player One Username: ")
password = input("Player One Password: ")
for row in csv_reader:
print(row[0], row[1])
print(username, password)
if row[0]== username and row[1] == password:
login = True
break
else:
login = False
break
if login == True:
print("You are now logged in!")
else:
print("Incorrect. Game Over.")
exit()
else:
print('Only Valid Usernames can play. Game Over.')
exit()
| [
"[email protected]"
] |