blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
ac94a5d6fd7d3d646cd54340c03f25cede028ef2 | Python | kratipatidar/predicting_stock_market | /finviz_analysis.py | UTF-8 | 2,246 | 3.53125 | 4 | [] | no_license | from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
import matplotlib.pyplot as plt
# here we scrape the data from FinViz website
finviz_url = 'https://finviz.com/quote.ashx?t='
tickers = ['GME', 'TSLA']
# defining a dictionary for the news tables and getting the URLs for each company
news_tables = {}
for ticker in tickers:
url = finviz_url + ticker
# now we request the data using the URLs
req = Request(url=url, headers={'user-agent': 'my-app'})
response = urlopen(req)
# saving the scraped data to a dictionary, by mapping using suitable ticker symbol
html = BeautifulSoup(response, 'html')
news_table = html.find(id='news-table')
news_tables[ticker] = news_table
# next we will mine the required data from scraped data
## first we define an empty list
parsed_data = []
## next we extract the news titles and date
for ticker, news_table in news_tables.items():
for row in news_table.findAll('tr'):
title = row.a.get_text()
date_data = row.td.text.split(' ')
if len(date_data) == 1:
time = date_data[0]
else:
date = date_data[0]
time = date_data[1]
## appending all the extracted data to the list
parsed_data.append([ticker, date, time, title])
# creating a dataframe out of the parsed data
df = pd.DataFrame(parsed_data, columns=['ticker', 'date', 'time', 'title'])
# calling the sentiment analysis function
vader = SentimentIntensityAnalyzer()
# applying the sentiment analysis to our data
f = lambda title: vader.polarity_scores(title)['compound']
df['compound'] = df['title'].apply(f)
# altering the dataframe for visualization purposes
df['date'] = pd.to_datetime(df.date).dt.date
mean_df = df.groupby(['ticker', 'date']).mean()
mean_df = mean_df.unstack()
mean_df = mean_df.xs('compound', axis='columns').transpose()
print(mean_df)
# finally we plot our results
plt.figure(figsize=(16, 12))
mean_df.plot(kind='bar', grid=True, color=['orange', 'green'])
plt.legend(loc='best')
plt.xlabel('Date')
plt.ylabel('Compound Scores')
plt.title('Compound Scores of each Company - Using FinViz Data')
plt.show()
| [
"matplotlib"
] |
4a4b0875e30ac566b884fd61aca65d23501caa83 | Python | danfitz7/SoftRobotPressureChambersManualControl | /mecode/SoftRobotPressureChamberManualControl.py | UTF-8 | 11,648 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Soft Robot mecode
Version 3
Daniel Fitzgerald, Harvard lewis Research Group
07/21/2014
This version includes two pressure chambers which power actuator channels A and B through two valves.
Functions for valves, actuators, and pressure chambers have also been moves to seperate files, as has standard matrix printing functionality and the starting and ending G Code for robomama.
"""
# -*- coding: utf-8 -*-
from mecode import G
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
#Soft robot printing libraries
from Robomama import *
from MatrixPrinting import *
from QuakeValve import *
from Actuators import *
from PrintingGlobals import *
from PressureChambers import *
#init_G from PrintingGlobals.py
g=init_G(r"H:\User Files\Fitzgerald\SoftRobots\SoftRobotPressureChambersManualControl\gcode\SoftRobotPressureChambersManualControl.pgm")
def print_robot():
global g
# PRINT_SPECIFIC PARAMETERS
MACHINE_ZERO = -58 #-58.36 #zero on the top of the left ecoflex layer
MACHINE_ZERO_RIGHT = -58#-58.273 #the top of the left ecoflex layer
right_side_offset = MACHINE_ZERO_RIGHT-MACHINE_ZERO # added to the print height of the right actuators
#Absolute Machine Coordinates of the top left corner of the mold
MOLD_MACHINE_X = 367.67
MOLD_MACHINE_Y = 180.034
# mold parameters
mold_z_zero_abs = 0 # absolute zero of the top of the mold
mold_center_x = 53.5 # x coordinate of the center of the robot, relative to mold top left corner
mold_front_leg_row_y = - 13 # y cooredinate of the center of the front/foreward actuators, relative to mold top left corner
mold_back_leg_row_y = -34 # Y coordiante of the centerline of the back legs/actuators
mold_depth = 7.62 # total depth of the body of the robot
mold_body_width = 25.4 # width of the body of the robot
mold_body_length = 65.2 #length of the body of the robot
mold_head_y = -4.9 # y coordinate of the tip of the head of the robot
# travel height above the mold
travel_height_abs = mold_z_zero_abs + 5
# needle inlet connections in the abdomen
inlet_length = 2
inlet_print_speed = 0.5
inlet_distance_from_edge = 4
#actuators
n_actuator_rows=4
actuator_print_height_offset = 0.1 # nozzle height above ecoflex
actuator_print_height = mold_z_zero_abs + actuator_print_height_offset
actuator_separation_y = 7 #distance between the "legs" actuator_z_connect_inset = 5
actuator_z_connect_inset = 5
left_actuators_interconnects_x = mold_center_x - mold_body_width/2 + actuator_z_connect_inset
right_actuators_interconnects_x = mold_center_x + mold_body_width/2 - actuator_z_connect_inset
def print_right_actuator():
print_actuator(theta = 1.5*np.pi, print_height_abs = actuator_print_height+right_side_offset)
def print_left_actuator():
print_actuator(theta = 0.5*np.pi, print_height_abs = actuator_print_height)
# control lines
control_line_height_abs = mold_z_zero_abs - mold_depth/2.0 # height of control line channels A and B
control_line_bridge_height_abs = control_line_height_abs + 2 # height to bridge over the control lines
control_line_x_dist_from_center_line = (1.0/6.0)*mold_body_width # distance control lines A and B are from the centerline of the robot (to the left and right respectivly)
control_line_A_x = mold_center_x - control_line_x_dist_from_center_line
control_line_B_x = mold_center_x + control_line_x_dist_from_center_line
################ START PRINTING ################
# Print headers and Aerotech appeasement
g.write(multiNozzle_start_code)
g.write("$zo_ref = "+str(MACHINE_ZERO)) # set the $zo_ref as the reference zero for axis homing (why does homing/offsets need a reference?)
g.write(multiNozzle_homing_code) # comment this to not home axis
g.write(multiNozzle_start_code_2)
# set the current X and Y as the origin of the current work coordinates
g.absolute()
# go to our absolute work zero (mold top left corner)
g.write("POSOFFSET CLEAR A ; clear all position offsets and work coordinates.") #We should start in machine coordinates because the homing routine clears all position offsets, but clear them again anyway just incase
g.feed(default_travel_speed)
g.abs_move(x=MOLD_MACHINE_X, y=MOLD_MACHINE_Y)
move_z_abs(default_travel_height_abs) #MACHINE_ZERO+
# set this current mold zero as the work zero (set clearany current position offsets
# move_z_abs(MACHINE_ZERO+default_travel_height_abs)
g.write("\nG92 X0 Y0 "+default_z_axis+str(default_travel_height_abs)+" ; set the current position as the absolute work coordinate zero origin")
g.feed(default_travel_speed)
#travel_mode()
g.write(" ; READY TO PRINT")
################ Valves ################
abdomen_length = 41.5
control_line_start_y = mold_back_leg_row_y -2
valve_separation_dist = 02 #distance from the back legs to the valves
valve_flow_connection = 3
valve_control_connection = 1
valve_print_height = control_line_height_abs -0.39 #this is the pad_z_separation from the print_valve function
valve_flow_height = valve_print_height + 0.39
valve_y = control_line_start_y - valve_flow_connection - valve_separation_dist
valve_x_distance = 1.5 #distance from the center of the body to the valve is center
valve_angle = np.pi
valve_connection_dwell_time = 2 # when connecting to the valve flowstems, dwell for this long to make a good blob junction
right_valve_x = mold_center_x+valve_x_distance
left_valve_x = mold_center_x-valve_x_distance
# pressure chambers
pressure_chamber_separation_distance = 2
pressure_chamber_front_y = valve_y-valve_flow_connection-pressure_chamber_separation_distance
#print the left pressure chamber
g.abs_move(control_line_A_x, y=valve_y-valve_flow_connection-default_total_pressure_chamber_inlet_length-default_pressure_chamber_length-pressure_chamber_separation_distance)
print_pressure_chamber(print_height_abs=control_line_height_abs)
#print left valve
g.abs_move(x=left_valve_x, y = valve_y)
print_valve(flow_connection_x = valve_flow_connection, control_connection_y = valve_control_connection, print_height_abs=valve_print_height, theta=valve_angle, control_stem_corner=False, flow_inlet=False)
#connect the left pressure chamber to the bottom flow line of the right valve
g.abs_move(x=control_line_A_x, y = pressure_chamber_front_y)
print_mode(print_height_abs = valve_flow_height)
g.abs_move(x=left_valve_x,y=valve_y-valve_flow_connection)
travel_mode()
#print control line A (left side)
g.abs_move(left_valve_x, valve_y+valve_flow_connection) #go over the top flow line of the left valve
print_mode(print_height_abs = valve_flow_height) #connect to flow line
g.dwell(valve_connection_dwell_time)
g.abs_move(x=control_line_A_x, y = control_line_start_y, z=control_line_height_abs) # print just below the flow control A start
g.abs_move(y = mold_front_leg_row_y) #move up to flow line A
#print the top left actuator (A1) directly from the end of control line A
g.abs_move(x=left_actuators_interconnects_x)
move_z_abs(actuator_print_height)
print_left_actuator()
#print the right pressure chamber
g.abs_move(control_line_B_x, y=pressure_chamber_front_y-default_total_pressure_chamber_inlet_length-default_pressure_chamber_length)
print_pressure_chamber(print_height_abs=control_line_height_abs)
#connect the right pressure chamber to the bottom flow line of the right valve
g.abs_move(x=control_line_B_x, y = pressure_chamber_front_y)
print_mode(print_height_abs = valve_flow_height)
g.abs_move(x=right_valve_x,y=valve_y-valve_flow_connection)
travel_mode()
#print right valve
g.abs_move(x=right_valve_x,y=valve_y)
print_valve(flow_connection_x = valve_flow_connection, control_connection_y = valve_control_connection, print_height_abs=valve_print_height, x_mirror=True, theta=valve_angle, control_stem_corner=False, flow_inlet=False)
#print control line B (right side)
g.abs_move(x=right_valve_x, y=valve_y+valve_flow_connection) #move over the top flow connector of the right valve
print_mode(print_height_abs = valve_flow_height) #connect to flow line
g.dwell(valve_connection_dwell_time)
g.abs_move(x=control_line_B_x, y = control_line_start_y, z=control_line_height_abs)
g.abs_move(y = mold_front_leg_row_y)
#print actuator B1 (top right) directly from end of control line B
g.abs_move(x=right_actuators_interconnects_x)
move_z_abs(actuator_print_height)
print_right_actuator()
#Print the rest of the actuators off of the control lines
def print_mode_control_line_B():
print_mode(print_height_abs = control_line_height_abs, whipe_distance = 1, whipe_angle = np.pi)
def print_mode_control_line_A():
print_mode(print_height_abs = control_line_height_abs, whipe_distance = 1, whipe_angle = 0)
#print actuator A3 (second from top, right) bridging over control line B
g.abs_move(x=control_line_A_x, y=mold_front_leg_row_y - 1*actuator_separation_y)
print_mode_control_line_A()
move_z_abs(control_line_bridge_height_abs)
g.feed(default_print_speed)
g.abs_move(right_actuators_interconnects_x)
print_right_actuator()
#print actuator B3 (second from top, left) going around the control line of A3
g.abs_move(x=control_line_B_x, y=mold_front_leg_row_y - 1.5*actuator_separation_y)
print_mode_control_line_B()
move_z_abs(control_line_bridge_height_abs)
g.feed(default_print_speed)
g.abs_move(x=left_actuators_interconnects_x)
g.abs_move(y=mold_front_leg_row_y - 1.0*actuator_separation_y)
print_left_actuator()
##print actuator A2 (second from bottom, left)
g.abs_move(x=control_line_A_x, y = mold_front_leg_row_y - 2*actuator_separation_y)
print_mode_control_line_A()
g.abs_move(x=left_actuators_interconnects_x)
print_left_actuator()
#print actuator B2 (second from bottom, right)
g.abs_move(x=control_line_B_x, y = mold_front_leg_row_y - 2*actuator_separation_y)
print_mode_control_line_B()
g.abs_move(x=right_actuators_interconnects_x)
print_right_actuator()
#print actuator B4 (bottom, left) bridging over control line A
g.abs_move(x=control_line_B_x, y = mold_front_leg_row_y - 3*actuator_separation_y)
print_mode_control_line_B()
move_z_abs(control_line_bridge_height_abs)
g.feed(default_print_speed)
g.abs_move(x=left_actuators_interconnects_x)
print_left_actuator()
#print actuator A4 (bottom right) going around the control line of B4
g.abs_move(x=control_line_A_x, y = mold_front_leg_row_y - 2.5*actuator_separation_y)
print_mode_control_line_A()
move_z_abs(control_line_bridge_height_abs)
g.feed(default_print_speed)
g.abs_move(x=right_actuators_interconnects_x)
g.abs_move(y=mold_front_leg_row_y - 3*actuator_separation_y)
print_right_actuator()
#go back to home
g.abs_move(x=0,y=0,**{default_z_axis:default_travel_height_abs})
#Cleanup/Aerotech functions at the end
#g.write(end_script_string)
g.write(multiNozzle_end_code)
#main program
print_robot()
g.view()
g.teardown() | [
"matplotlib"
] |
7c9f7690cba1a6942a34655f962b6806ad15c46e | Python | irromano/SleepSure | /Code/MachineLearning/src/CNN.py | UTF-8 | 2,911 | 2.75 | 3 | [] | no_license | import numpy as np
import pandas as pd
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tensorflow.keras import datasets, layers, models
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.models import Sequential
import matplotlib.pyplot as plt
from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten
from keras.utils import np_utils
class CNN:
def __init__(self):
print("Running CNN network")
def graph(self, df, columnName, rowNum):
subband=df[columnName][rowNum]
plt.plot(subband)
plt.show()
#makes prediction
def predictModel(self, path, dataArray):
predictionLabels=["Seizure", " non Seizure"]
model=keras.models.load_model(path)
prediction_features = model.predict(dataArray)
#print(prediction_features)
if(prediction_features[0][1]==0):
print ("Seizure")
else:
print ("Non seizure")
#print( predictionLabels[np.argmax(prediction_features)])
#runs model
def runModel(self, training_array_input,testing_array_input,training_array_output,testing_array_output):
#input data
X_train=training_array_input
X_test=testing_array_input
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
#output data
y_train=training_array_output
y_test=testing_array_output
# building the input vector
X_train = X_train.reshape(X_train.shape[0], 3, 342, 1)
X_test = X_test.reshape(X_test.shape[0], 3, 342, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# one-hot encoding using keras' numpy-related utilities
n_classes = 2
Y_train = np_utils.to_categorical(y_train, n_classes)
Y_test = np_utils.to_categorical(y_test, n_classes)
# building a linear stack of layers with the sequential model
model = Sequential()
# convolutional layer
model.add(Conv2D(25, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu', input_shape=(3,342,1)))
model.add(MaxPool2D(pool_size=(1,1)))
# flatten output of conv
model.add(Flatten())
# hidden layer
model.add(Dense(100, activation='relu'))
# output layer
model.add(Dense(2, activation='softmax'))
# compiling the sequential model
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# training the model for 10 epochs
model.fit(X_train, Y_train, epochs=50, validation_data=(X_test, Y_test))
model.save("my_model.h5")
return model | [
"matplotlib"
] |
77eb83006be16ac7829efb4cbcb5035ef0539487 | Python | proffessorx/miriam | /planner/astar/networkx_demo.py | UTF-8 | 1,501 | 2.84375 | 3 | [] | no_license | import timeit
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from planner.astar import astar_grid48con
from tools import load_map
map = load_map('o.png')
print(map)
map = map[:, ::2]
print(map)
#print [list(i) for i in zip(*map)]
n = map.shape[1]
print map.shape
G = nx.grid_graph([n, n])
#print ("G: ", G)
start = (1, 7)
goal = (7, 1)
assert map[start] >= 0, "start in obstacle"
assert map[goal] >= 0, "goal in obstacle, %d" % map[goal]
#print ("Start: ", start)
#print ("End: ", goal)
def cost(a, b):
if map[a] >= 0 and map[b] >= 0: # no obstacle
return np.linalg.norm(np.array(a)-np.array(b))
else:
#print ("else: ", np.Inf)
return np.Inf
obstacle = []
for n in G.nodes():
if not map[n] >= 0: # obstacle
obstacle.append(n)
print ("obstacle: ", obstacle)
G.remove_nodes_from(obstacle)
t = timeit.Timer()
t.timeit()
path = nx.astar_path(G, start, goal, cost)
print("Path::: ", path)
print("computation time:", t.repeat(), "s")
print("length: ", astar_grid48con.path_length(path))
fig, ax = plt.subplots()
ax.imshow(map.T, cmap='Greys', interpolation='nearest')
ax.set_title('astar path')
ax.axis([0, map.shape[0], 0, map.shape[1]])
ax.plot(
np.array(np.matrix(path)[:, 0]).flatten(),
np.array(np.matrix(path)[:, 1]).flatten(),
c='b',
lw=2
)
#n = G.number_of_nodes()
#if n < 500:
# plt.figure()
# pos = nx.spring_layout(G, iterations=1000, k=.1 / np.sqrt(n))
# nx.draw(G, pos)
#
#plt.show()
| [
"matplotlib"
] |
cbd0d76abfbae0e0869f720aa0208aeb0fc880a5 | Python | FAIRNS/sentence-processing-MEG-LSTM | /Code/behav_experiment/functions/plotting.py | UTF-8 | 9,052 | 2.515625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def generate_fig_humans_vs_RNNs(df_error_rates_humans, sig_list_humans, df_error_rates_LSTM, sig_list_rnns, successive_nested):
'''
:param df_error_rates_humans:
:param sig_list_humans:
:param df_error_rates_LSTM:
:param sig_list_rnns:
:param successive_nested:
:return:
'''
if successive_nested == 'successive':
structures = ['embedding_mental_SR', 'embedding_mental_LR']
xlim = [-0.3, 0.3]
elif successive_nested == 'nested':
structures = ['objrel', 'objrel_nounpp']
xlim = [-0.5, 1.5]
# Figure
fig_humans, axes = plt.subplots(1, 2, figsize=(10, 5))
hue_order = [True, False]
palette = ['b', 'r']
# HUMANS SR
ax = axes[0]
df = df_error_rates_humans.loc[
(df_error_rates_humans['sentence_type'] == structures[0]) & (
df_error_rates_humans['trial_type'] == 'Violation') & (
df_error_rates_humans['violation_position'].isin(['inner', 'outer']))]
sns.pointplot(x='violation_position', y='error_rate', hue='congruent_subjects', data=df, ax=ax,
hue_order=hue_order, palette=palette, dodge=0.25, join=False)
add_significance(ax, successive_nested, sig_list_humans[0][0], sig_list_humans[0][1], sig_list_humans[0][2])
ax.get_legend().set_visible(False)
ax.set_xlim(xlim)
ax.set_ylim([-0.1, 1.5])
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
ax.set_yticklabels([0, '', '', '', '', 0.5, '', '', '', '', 1])
ax.set_xticklabels(['Embedded', 'Main'])
ax.tick_params(labelsize=20)
# HUMANS LR
ax = axes[1]
df = df_error_rates_humans.loc[
(df_error_rates_humans['sentence_type'] == structures[1]) & (
df_error_rates_humans['trial_type'] == 'Violation') & (
df_error_rates_humans['violation_position'].isin(['inner', 'outer']))]
sns.pointplot(x='violation_position', y='error_rate', hue='congruent_subjects', data=df, ax=ax,
hue_order=hue_order, palette=palette, dodge=0.25, join=False)
ax.set_yticklabels([])
ax.set_xlim(xlim)
ax.set_ylim([-0.1, 1.5])
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
ax.set_yticklabels([0, '', '', '', '', 0.5, '', '', '', '', 1])
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels(['Embedded', 'Main'])
ax.tick_params(labelsize=20)
ax.get_legend().set_visible(False)
add_significance(ax, successive_nested, sig_list_humans[1][0], sig_list_humans[1][1], sig_list_humans[1][2], SR_LR='LR')
# LAYOUT
plt.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.2)
# fig_humans.text(x=0.03, y=0.7, s='Humans', fontsize=26, rotation=90)
## MODEL
fig_model, axes = plt.subplots(1, 2, figsize=(10, 5))
# MODEL SR
ax = axes[0]
df = df_error_rates_LSTM.loc[
(df_error_rates_LSTM['sentence_type'] == structures[0]) & (
df_error_rates_LSTM['violation_position'].isin(['inner', 'outer']))]
sns.pointplot(x='violation_position', y='error_rate', hue='congruent_subjects', data=df, ax=ax,
hue_order=hue_order, palette=palette, dodge=0.25, join=False)
ax.get_legend().set_visible(False)
ax.set_xlabel('')
ax.set_xticklabels(['Embedded', 'Main'])
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
ax.set_yticklabels([0, '', '', '', '', 0.5, '', '', '', '', 1])
ax.tick_params(labelsize=20)
ax.set_xlim(xlim)
ax.set_ylim([-0.1, 1.5])
ax.set_ylabel('')
add_significance(ax, successive_nested, sig_list_rnns[0][0], sig_list_rnns[0][1], sig_list_rnns[0][2])
# MODEL LR
ax = axes[1]
df = df_error_rates_LSTM.loc[(df_error_rates_LSTM['sentence_type'] == structures[1]) & (
df_error_rates_LSTM['violation_position'].isin(['inner', 'outer']))]
sns.pointplot(x='violation_position', y='error_rate', hue='congruent_subjects', data=df, ax=ax,
hue_order=hue_order, palette=palette, dodge=0.25, join=False)
ax.get_legend().set_visible(False)
ax.set_xlabel('')
ax.tick_params(labelsize=20)
ax.set_xticklabels(['Embedded', 'Main'])
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
ax.set_yticklabels([0, '', '', '', '', 0.5, '', '', '', '', 1])
ax.set_xlim(xlim)
ax.set_ylim([-0.1, 1.5])
ax.set_ylabel('')
add_significance(ax, successive_nested, sig_list_rnns[1][0], sig_list_rnns[1][1], sig_list_rnns[1][2], SR_LR='LR')
# LAYOUT
plt.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.2)
# fig_model.text(x=0.03, y=0.6, s='NLM', fontsize=26, rotation=90)
# LEGEND
import numpy as np
fig_legend, ax = plt.subplots(figsize=(20, 2.5))
lines = []
lines.append(ax.scatter(range(1), np.random.randn(1), color='blue', lw=6, label='Congruent Subjects'))
lines.append(ax.scatter(range(1), np.random.randn(1), color='red', lw=6, label='Incongruent Subjects'))
plt.legend(loc='center', prop={'size': 45}, ncol=2)
for _ in range(2):
l = lines.pop(0)
# l = l.pop(0)
l.remove()
del l
ax.axis('off')
return fig_humans, fig_model, fig_legend
def add_significance(ax, successive_nested, text_interaction, text_embedded, text_main, delta_y=0.05, pad_y_interaction=0.3, pad_y=0.05, SR_LR='SR'):
if successive_nested == 'nested':
# significance of interaction
if not text_interaction.startswith('ns'):
x1, x2 = (ax.get_children()[2]._x[0]+ax.get_children()[4]._x[0])*0.5, (ax.get_children()[3]._x[0] + ax.get_children()[5]._x[0])*0.5
y, col = np.max(np.concatenate((ax.get_children()[2]._y, ax.get_children()[3]._y, ax.get_children()[4]._y, ax.get_children()[5]._y))) + pad_y_interaction, 'k'
if np.isnan(y):
if SR_LR == 'SR':
y = 0.356 + pad_y_interaction
elif SR_LR == 'LR':
y = 0.79 + pad_y_interaction
ax.plot([x1, x1, x2, x2], [y, y + delta_y, y + delta_y, y], lw=1.5, c=col)
ax.text((x1 + x2) * .5, y + delta_y, text_interaction, ha='center', va='bottom', color=col, fontsize=20)
# significance of embedded
if not text_embedded.startswith('ns'):
x1, x2 = ax.get_children()[2]._x[0], ax.get_children()[4]._x[0]
y, col = np.max(np.concatenate((ax.get_children()[2]._y, ax.get_children()[4]._y))) + pad_y, 'k'
if np.isnan(y):
if SR_LR == 'SR':
y = 0.356 + pad_y
elif SR_LR == 'LR':
y = 0.79 + pad_y
ax.plot([x1, x1, x2, x2], [y, y + delta_y, y + delta_y, y], lw=1.5, c=col)
ax.text((x1 + x2) * .5, y + delta_y, text_embedded, ha='center', va='bottom', color=col, fontsize=20)
# significance of main
if not text_main.startswith('ns'):
x1, x2 = ax.get_children()[3]._x[0], ax.get_children()[5]._x[0]
y, col = np.max(np.concatenate((ax.get_children()[3]._y, ax.get_children()[5]._y))) + pad_y, 'k'
if np.isnan(y):
y = 0.163 + pad_y
ax.plot([x1, x1, x2, x2], [y, y + delta_y, y + delta_y, y], lw=1.5, c=col)
ax.text((x1 + x2) * .5, y + delta_y, text_main, ha='center', va='bottom', color=col, fontsize=20)
if successive_nested == 'successive':
if not text_embedded.startswith('ns'):
# significance of embedded
x1, x2 = ax.dataLim.x0, ax.dataLim.x1
y, col = max([ax.dataLim.y0, ax.dataLim.y1]) + pad_y, 'k'
ax.plot([x1, x1, x2, x2], [y, y + delta_y, y + delta_y, y], lw=1.5, c=col)
ax.text((x1 + x2) * .5, y + delta_y, text_embedded, ha='center', va='bottom', color=col, fontsize=20)
def generate_scatter_incongruent_subjects_V1_vs_V2(df, sentence_type):
X1 = df.loc[(df['sentence_type'] == sentence_type) & (df['violation_position'] == 'inner') & (df['congruent_subjects'] == False)&(df['trial_type'] == 'Violation')]
X1 = X1.groupby('subject', as_index=False).mean()
X2 = df.loc[(df['sentence_type'] == sentence_type) & (df['violation_position'] == 'outer') & (df['congruent_subjects'] == False)&(df['trial_type'] == 'Violation')]
X2 = X2.groupby('subject', as_index=False).mean()
fig, ax = plt.subplots(figsize=[10,10])
ax.scatter(X1['error_rate'].values, X2['error_rate'].values, c=X1['subject'].values)
ax.plot(np.mean(X1.values[:, 1]), np.mean(X2.values[:, 1]), 'ro')
ax.set_xlabel('Error-Rate Embedded Verb', fontsize=24)
ax.set_ylabel('Error-Rate Main Verb', fontsize=24)
ax.set_title(sentence_type, fontsize=24)
ax.plot([0, 1], [0, 1], 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
# plt.show()
return fig, ax
# ax = sns.scatterplot(x="total_bill", y="tip", hue="time", style="time", data=tips) | [
"matplotlib",
"seaborn"
] |
3ae8492016513e5a1cce4e8422b69a0117db7499 | Python | ZhouHuang/2019-nCov-plot | /ostest.py | UTF-8 | 3,941 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 15:32:40 2020
@author: Administrator
OS module test
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas
import scipy as sp
from scipy.optimize import curve_fit
from scipy.optimize import fsolve
from scipy import integrate
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
date_list = ['20200126','20200127','20200128','20200129',
'20200130','20200131','20200201','20200202',
'20200203','20200204','20200205','20200206',
'20200207','20200208','20200209','20200210']
date_list2 = [i for i in range(17)]
date_list3 = [i for i in range(50)]
city_name = '武汉'
city_list1 = {'武汉': [(618, 703), (698, 803), (1590, 1722), (1905, 2056), (2261, 2444), (2639, 2901), (3215, 3513), (4109, 4508), (5142, 5635), (6384, 7003), (8351, 9087), (10117, 10986), (11618, 12638), (13603, 14846), (14982, 16468), (16902, 18629), (18454, 20444)]}
city_list2 = {}
city_list2[city_name] = city_list1[city_name].copy()
total_counts = []
delta_counts = []
while( len(city_list1[city_name]) > 0):
temp = city_list1[city_name].pop(0)
total_counts.append(temp[0])
delta_counts.append(0)
while( len(city_list2[city_name]) > 1):
temp = city_list2[city_name].pop(0)
delta_counts.append(city_list2[city_name][0][0] - temp[0])
# print(delta_counts)
def total_func(x,a1,a2,a3):
return a3 * ( 1 - 1.0/(1.0 + np.exp((x-a1)/a2)) )
# return ( 1 - 1.0/(1.0 + np.tanh((x-a1)/a2))
# def total_func(x,c1,a1,a2):
# return c1 + integrate.quad(delta_func(x,a1,a2),0,17)
# def delta_func(x,a1,a2,a3):
# return a1 / (np.sqrt(2*np.pi) * a2) * np.exp(-(x-a3)*(x-a3)/2/a2/a2)
def delta_func(x,a1,a2):
return a1 * np.exp(-a2 * np.power(x,2)) * np.power(x,2)
fig = plt.figure()
# fig.title("荆州")
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8],xlim=(-1,date_list3[-1]))
ax1.grid()
ax1.plot(date_list2,total_counts,'b^',label='感染总数')
ax1.set_title(city_name,fontsize=16)
ax2 = ax1.twinx()
ax2.yaxis.label.set_color('red')
ax2.tick_params(axis='y', colors='red')
ax2.plot(date_list2,delta_counts,'r^',label='日增数目')
handles1, labels1 = ax1.get_legend_handles_labels()
handles2, labels2 = ax2.get_legend_handles_labels()
plt.legend(handles1+handles2, labels1+labels2, loc='center right',
frameon=True, shadow=True)
# popt, pcov = sp.optimize.curve_fit(total_func, date_list, total_counts,bounds=(0, [3., 0.5]))
# popt, pcov = curve_fit(total_func, date_list2, total_counts)
popt_delta, pcov_delta = curve_fit(delta_func, date_list2, delta_counts)
ax2.plot(date_list3, delta_func(date_list3, *popt_delta), 'r--',)
popt_total, pcov_total = curve_fit(total_func, date_list2, total_counts,bounds=([0,0,0],[20,100,50000] ) )
print(popt_total)
ax1.plot(date_list3, total_func(date_list3, *popt_total), 'b--',)
def delta_func_cal_days(x,a1=popt_delta[0],a2=popt_delta[1]):
return a1 * np.exp(-a2 * np.power(x,2)) * np.power(x,2) - 1
r = fsolve(delta_func_cal_days, (50))
ax1.axvline(x=r, ls='--', c='y',label='end date') #end date
handles1, labels1 = ax1.get_legend_handles_labels()
handles2, labels2 = ax2.get_legend_handles_labels()
plt.legend(handles2+handles1, labels2+labels1, loc='center right',
frameon=True, shadow=True)
plt.xticks(date_list3[::2])
# x = np.linspace(0, 10, 1000)
# fig, ax = plt.subplots()
# ax.plot(x, np.sin(x), label='sin')
# ax.plot(x, np.cos(x), '--', label='cos')
# ax.legend(loc='upper left', frameon=False);
# files = os.listdir(os.path.abspath('data'))
# for j_file in files:
# if '20200201' in j_file:
# #print(j_file)
# df = pandas.read_json('./data/{}'.format(j_file),encoding='utf-8')
# #print(df.iloc[:,6])
# bf = np.array(df).tolist()
# print(df.iloc[1,-2] == '上海')
| [
"matplotlib"
] |
37df33bafab213fc9f0a8322adce3e36562b1bbe | Python | Lucasgb7/sinais_sistemas | /Matemática Aplicada a Engenharia/4a.py | UTF-8 | 443 | 3.296875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def plotGraphic(eixoy, var, x, y):
var.set_ylabel(eixoy)
var.set_xlabel('W')
var.set_title(eixoy)
var.plot(x, y,)
var.grid()
w = np.arange(-5, 5, 0.1)
a = 3
z = (a) / (a + 1j*w)
angulo = np.arctan(z.imag/z.real)
modulo = abs(z)
fig, ax = plt.subplots(2)
plotGraphic('Magnitude(w)', ax[0], w, modulo)
plotGraphic('Fase(w)', ax[1], w, angulo)
plt.subplots_adjust(hspace=0.5)
plt.show() | [
"matplotlib"
] |
1f26367e806475fad43b659e7e71159e00fdce3e | Python | AswinGnanaprakash/code_box | /autoML/application/main.py | UTF-8 | 1,498 | 2.8125 | 3 | [] | no_license |
import tkinter as tk
from tkinter import filedialog
from algo import main
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from pandas import DataFrame
def UploadAction(event=None):
filez = filedialog.askopenfilenames(parent=r,title='Choose a file')
files_data = r.tk.splitlist(filez)
result = main(files_data)
import operator
sorted_d = dict(sorted(result.items(), key=operator.itemgetter(1),reverse=True))
top_10 = list(sorted_d.keys())[0:10]
re_insert = dict()
list_value = list()
predict_values = list()
for i in top_10:
list_value.append(i)
predict_values.append(float(sorted_d[i]))
re_insert['Algorithms'] = list_value
re_insert['Accuracy'] = predict_values
print(re_insert)
df1 = DataFrame(re_insert,columns=['Algorithms','Accuracy'])
figure1 = plt.Figure(figsize=(30,15), dpi=100)
ax1 = figure1.add_subplot(111)
ax1.set_title('Best algorithms for your model')
bar1 = FigureCanvasTkAgg(figure1, r)
bar1.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
df1 = df1[['Algorithms','Accuracy']].groupby('Algorithms').sum()
df1.plot(kind='bar', legend=True, ax=ax1)
if __name__ == "__main__" :
global r
r = tk.Tk()
r.geometry("300x200+10+20")
r.title('AutoML')
r.configure(background = 'light green')
button = tk.Button(r, text='Open', command=UploadAction)
button.pack()
r.mainloop()
| [
"matplotlib"
] |
d6b754e8181799c9b49c38c2a50d39be716d993b | Python | zhaoyangyingmu/ImageRecognitionNaive | /src/im_recognition/image_network_weight.py | UTF-8 | 2,147 | 2.90625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from util.network_elements import Network
from util.image_data import ImageData
from util.network_util import NetworkUtil
class CompareWeight:
def __init__(self):
# set up network
net_config = [784,14,-1]
learning_rate = 0.1
l_weights = list((np.arange(11)-5)*0.1)
bias = 0.5
networks = []
for weight in l_weights:
networks.append(Network(net_config,learning_rate,weight,bias))
print("networks ready!")
self.l_weights = l_weights
self.networks = networks
self.bias = bias
self.l_ll = []
pass
def training(self):
l_weights = self.l_weights
networks = self.networks
get_hit_rate = NetworkUtil.get_hit_rate
bias = self.bias
# training
[x_train, y_train] = ImageData.get_train_data()
print("training data ready!")
l_ll = [0.0] * len(l_weights)
for i in range(len(l_weights)):
l_ll[i] = []
for e in range(100):
for i in range(287):
idx = i * 10
for j in range(len(networks)):
loss = networks[j].train(x_train[idx:idx + 10, :], y_train[idx:idx + 10, :])
l_ll[j].append(loss)
print("epoch", e)
for i in range(len(l_weights)):
hit_rate = get_hit_rate(networks[i])
print("hit rate =", hit_rate, " with weight =", l_weights[i], "bias =", bias)
self.l_ll = l_ll
def show_result(self):
l_weights = self.l_weights
l_ll = self.l_ll
plt.title('loss with different weight')
for i in range(len(l_weights)):
x = np.arange(1, len(l_ll[i]) + 1)
label_str = 'weight = ' + str(l_weights[i])
plt.plot(x[100:], l_ll[i][100:], label=(label_str))
plt.legend()
plt.xlabel('iteration times')
plt.ylabel('loss')
plt.show()
def get_best_network(self):
best_network = NetworkUtil.get_best_network(self.networks)
return best_network
| [
"matplotlib"
] |
a45165ba781ea2eb881f757a346696568036ebfd | Python | deep141997/data-analysis | /tmdb movie analysis | UTF-8 | 1,570 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 11:27:41 2018
@author: deepak
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from subprocess import check_output
data=pd.read_csv('/home/deepak/Downloads/dataset/tmdb_5000_movies.csv')
print(data.info)
print(data.columns) # to print all columns
print(data.head(6))
print(data.corr())
f,ax=plt.subplots(figsize=(10,10)) #initialize figure and axis object
sns.heatmap(data.corr(), annot = True, linewidths=.9, fmt = '.1f', ax = ax) #axis for matplotlib otherwise use currenly active axis
# width of line that will divide each cell
# annot if true write data in cell
plt.show()
#using matplotlib libraray
"""vote_count_data=data['vote_count']
#print(vote_count_data) print all vote count data
# creating figure and axis
fig,ax=plt.subplots()
ax.violinplot(vote_count_data,vert=False)
plt.plot(vote_count_data,"r-")
plt.show() """
# line plot between budget vs revenue
"""data.plot(kind='line',color='r',x='vote_count',y='budget',alpha='.5')
plt.xlabel('vote_count')
plt.ylabel('budget')
plt.title('Line Plot')
plt.show() """
#scatter plot vs vote_count and budget
data.plot(kind='scatter',x='vote_count',y='budget',alpha='.5',color='b')
plt.xlabel('vote_count')
plt.ylabel('budget')
plt.title('scatter plot')
plt.show()
#histogram plot
data.budget.plot(kind='hist',bins=20,figsize=(12,12))
plt.title('budget histogram')
plt.show()
data.vote_count.plot(kind='hist',bins=20,figsize=(15,15))
plt.title('vote_count histogram')
plt.show() | [
"matplotlib",
"seaborn"
] |
48b07f18d20c2075d1cf040572eb4982f7168752 | Python | MonNum5/Probabilistc_Machine_Learning_Models | /models/sparse_gaussian_process_pyro.py | UTF-8 | 4,637 | 2.640625 | 3 | [] | no_license |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 10:48:26 2019
@author: hall_ce
"""
#gaussian process with pyro (pytorch based)
import torch
import pyro
import pyro.contrib.gp as gp
from torch.autograd import Variable
import os
import matplotlib.pyplot as plt
import torch
import pyro
import pyro.contrib.gp as gp
import pyro.distributions as dist
import random
import numpy as np
def kernel_fun(input_dim, rbf_variance=5., lengthscale_rbf=10.):
kernel= gp.kernels.RBF(input_dim=input_dim, variance=torch.tensor(rbf_variance),lengthscale=torch.tensor(lengthscale_rbf))
return kernel
class GP_pyro_sparse:
def __init__(self,input_dim,number_of_inducing_points,lr=0.1,epochs=3000, rbf_variance=5.0, lengthscale_rbf=10., variance_noise=1.0, weight_decay=1e-4):
self.input_dim=input_dim
self.lr=lr
self.epochs=epochs
self.rbf_variance=rbf_variance
self.lengthscale_rbf=lengthscale_rbf
self.variance_noise=variance_noise
self.number_of_inducing_points=number_of_inducing_points #specifies the intervall, the sparse algorithm selects the initial points
self.weight_decay=weight_decay
self.kernel=kernel_fun(input_dim=self.input_dim, rbf_variance=self.rbf_variance, lengthscale_rbf=self.lengthscale_rbf)
def fit(self, X_train, y_train, verbose=False):
train_x=Variable(torch.from_numpy(X_train).float())
train_y=Variable(torch.from_numpy(y_train.flatten()).float())
#select inducing points
#random selection
rand_select=random.sample(range(X_train.shape[0]),self.number_of_inducing_points)
Xu=train_x[rand_select]
#likelihood = gp.likelihoods.Gaussian()
#self.gpr = gp.models.VariationalSparseGP(train_x, train_y, kernel=self.kernel, Xu=Xu, likelihood=likelihood, whiten=True)
self.gpr = gp.models.SparseGPRegression(train_x, train_y, self.kernel, Xu=Xu, noise=torch.tensor(self.variance_noise), jitter=1.0e-5)
optimizer = torch.optim.Adam(self.gpr.parameters(), lr=self.lr, weight_decay=self.weight_decay)
loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
losses = []
for epoch in range(self.epochs):
optimizer.zero_grad()
loss = loss_fn(self.gpr.model , self.gpr.guide)
loss.backward()
if verbose:
if epoch % 100 == 0:
print('Epoch {} loss: {}'.format(epoch+1, loss.item()))
optimizer.step()
losses.append(loss.item())
def predict(self,X, return_std=False):
x=Variable(torch.from_numpy(X).float())
with torch.no_grad():
mean, cov = self.gpr(x, noiseless=False)
y_mean=mean.numpy()
y_std=cov.diag().sqrt().numpy()
if return_std==True:
return y_mean, y_std
else:
return y_mean
'''
import matplotlib.pyplot as plt
import numpy as np
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
X.shape, y.shape, xx.shape
x_train=x[:int(x.shape[0]*0.8),:]
y_train=y[:int(x.shape[0]*0.8),:].flatten()
test_x=x[int(x.shape[0]*0.8):,:]
test_y=y[int(x.shape[0]*0.8):,:].flatten()
model=GP_pyro_sparse(input_dim=x.shape[1], number_of_inducing_points=int(x.shape[0]*0.8*0.5))
model.fit(x_train, y_train, verbose=True)
y_pred, y_std = model.predict(test_x, return_std=True)
plt.errorbar(test_y,y_pred,yerr=y_std,marker='o',fmt='none')
y_upper=y_pred+y_std
y_lower=y_pred-y_std
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx.flatten(), y_pred, 'r-', label=u'Prediction')
plt.plot(xx.flatten(), y_lower, 'k-')
plt.plot(xx.reshape(-1,1), y_upper, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
'''
| [
"matplotlib"
] |
834343b7457f079962b0b91f8a78c3e59abf9db0 | Python | dalowe48/ECE351_Code | /351Lab1.py | UTF-8 | 3,900 | 3.625 | 4 | [] | no_license | # Section 1
t = 1
print(t)
print("t =",t)
print('t =',t,"seconds")
print('t is now =',t/3,'\n...and can be rounded using `round()`',round(t/3,4))
print(3**2)
# Section 2
# This is a comment, and the following statement is not executed:
# print(t+5)
# Section 3
import numpy
# Section 4
list1 = [0,1,2,3]
print('list1:',list1)
list2 = [[0],[1],[2],[3]]
print('list2:',list2)
list3 = [[0,1],[2,3]]
print('list3:',list3)
array1 = numpy.array([0,1,2,3])
print('array1:',array1)
array2 = numpy.array([[0],[1],[2],[3]])
print('array2:',array2)
array3 = numpy.array([[0,1],[2,3]])
print('array3:',array3)
import numpy
print(numpy.pi)
import numpy as np
print(np.pi)
from numpy import pi
print(pi)
# Section 5
import numpy as np
print(np.arange(4),'\n',
np.arange(0,2,0.5),'\n',
np.linspace(0,1.5,4))
# Section 6
list1 = [1,2,3,4,5]
array1 = np.array(list1) # definition of a numpy array using a list
print('list1 :',list1[0],list1[4])
print('array1:',array1[0],array1[4])
array2 = np.array([[1,2,3,4,5],
[6,7,8,9,10]])
list2 = list(array2)
print('array2:',array2[0,2],array2[1,4])
print('list2 :',list2[0],list2[1]) # it is best to use numpy arrays
# for indexing specific values
# in multi-dimensional arrays
print(array2[:,2],
array2[0,:])
# Section 7
print('1x3:',np.zeros(3))
print('2x2:',np.zeros((2,2)))
print('2x3:',np.ones((2,3)))
# Section 1.3.2
# Sample code for Lab 1
import numpy as np
import matplotlib.pyplot as plt
# Define variables
steps = 0.1 # step size
x = np.arange(-2,2+steps,steps) # notice the final value is
# `2+steps` to include `2`
y1 = x + 2
y2 = x**2
# Code for plots
plt.figure(figsize=(12,8)) # start a new figure, with
# a custom figure size
plt.subplot(3,1,1) # subplot 1
plt.plot(x,y1)
plt.title('Sample Plots for Lab 1') # title for entire figure
# (all three subplots)
plt.ylabel('Subplot 1') # label for subplot 1
plt.grid(True)
plt.subplot(3,1,2) # subplot 2
plt.plot(x,y2)
plt.ylabel('Subplot 2') # label for subplot 2
plt.grid(which='both') # use major and minor grids
# (minor grids not available
# since plot is small)
plt.subplot(3,1,3) # subplot 3
plt.plot(x,y1,'--r',label='y1')
plt.plot(x,y2,'o',label='y2')
plt.axis([-2.5, 2.5, -0.5, 4.5])
plt.grid(True)
plt.legend(loc='lower right')
plt.xlabel('x') # x-axis label for all
# three subplots (entire figure)
plt.ylabel('Subplot 3') # label for subplot 3
plt.show() ### --- This MUST be included to view your plots! --- ###
# Section 1.3.3
import numpy as np
cRect = 2 + 3j
print(cRect)
cPol = abs(cRect) * np.exp(1j*np.angle(cRect))
print(cPol) # notice Python will store this in rectangular form
cRect2 = np.real(cPol) + 1j*np.imag(cPol)
print(cRect2) # converting from polar to rectangular
print(numpy.sqrt(3*5 - 5*5 + 0j))
# Section 1.3.4
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import scipy.signal as sig
import pandas as pd
import control
import time
from scipy.fftpack import fft, fftshift
#range() # create a range of numbers (nice for `for` loops)
#np.arange() # create a numpy array that is a range of number with a defined
# step size
#np.append() # add values to the end of a numpy array
#np.insert() # add values to the beginning of a numpy array
#np.concatenate() # combine two numpy arrays
#np.linspace() # create a numpy array that contains a specified (linear) range
# of values with a specified number of elements
#np.logspace() # create a numpy array that contains a specified (logarithmic, base
# specified) range of values with a specified number of elements
#np.reshape() # reshape a numpy array
#np.transpose() # transpose a numpy array
#len() # return the number of elements in an array (horizontal)
#.size # return the number of elements in an array (vertical)
#.shape # return the dimensions of an array
#.reshape # reshape the dimensions of an array (similar to numpy.reshape() above)
#.T # transpose an array (similar to np.transpose() above) | [
"matplotlib"
] |
d448414a4e1565ac91944c8fb10dbe7df00ef0b9 | Python | jfcjlu/APER | /Python/Ch. 11. R - Least Squares Fit - Straight Line Through Origin - Weighted Points.py | UTF-8 | 953 | 3.875 | 4 | [] | no_license | # LEAST SQUARES FIT - STRAIGHT LINE THROUGH ORIGIN - WEIGHTED POINTS (y = b*x)
from __future__ import division
import math
import numpy as np
import matplotlib.pyplot as plt
# Enter the values of x, y and their corresponding weights w:
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([0, 3, 6, 7, 9, 15, 17, 20, 25, 30])
w = np.array([1, 1, 2, 4, 5, 5, 2, 2, 1, 2])
# Plot, size of dots, labels, ranges of values, initial values
plt.scatter(x, y)
plt.xlabel("x, (m)") # set the x-axis label
plt.ylabel("Displacement, y (m)") # set the y-axis label
plt.grid(True)
# Evaluation
N = len(x)
WXX = sum(w*x**2)
WXY = sum(w*x*y)
b = WXY/WXX
d = y - b*x
WDD = sum(w*d**2)
Db = math.sqrt(WDD/((N-1)*WXX))
# Results
print("Value of b: ", b)
print("Standard error in b: ", Db)
# Plot least-squares line
xx = np.linspace(min(x), max(x), 200)
yy = b * xx
plt.plot(xx, yy, '-')
plt.show()
| [
"matplotlib"
] |
2672c4fe1ac4d05df47c8374706a61756f214b70 | Python | jeanplemos/probabilidade | /probpoli.py | UTF-8 | 2,419 | 3.375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
while True:
os.system('clear')
print()
print('\033[33m-=\033[m' * 31)
print(' \033[31m<< C A L C U L A D O R A B I N O M I A L >>\033[m')
print('\033[33m-=\033[m' * 31)
print()
print ("\033[36mPara calcular uma probabilidade binomial é preciso especificar:")
print()
n = int(input("Número de observações [n]: \033[31m"))
factn = 1
for i in range(1,n+1):
factn = factn * i
print()
x = int(input("\033[36mNúmero de sucessos [x]: \033[31m"))
factx = 1
for j in range(1,x+1):
factx = factx * j
factnx = 1
for k in range(1,(n-x)+1):
factnx = factnx * k
c = factn/(factx*factnx)
print()
p = float(input("\033[36mProbabilidade de sucesso em cada observação [p em valor absoluto]: \033[31m"))
px = float (c*(p**x)*(1-p)**(n-x))
print('''
\033[33m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\033[m
\033[31m<< R E S U L T A D O S >>\033[m
\033[33m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\033[m
''')
print (f"\033[36mProbabilidade de {x} é de: \033[31m{(px*100):.2f}%")
l = 0
pxa2 = 0
while l <= x:
factl = 1
for m in range(1,l+1):
factl = factl * m
factnl = 1
for o in range (1, (n-l)+1):
factnl = factnl * o
d = factn/(factl*factnl)
pxa = float(d*(p**l)*(1-p)**(n-l))
plt.plot([l], [pxa], linestyle='--', color='g', marker='s',
linewidth=3.0)
l = l+1
pxa2 = pxa2 + pxa
print()
print(f'\033[36mProbabilidade de no máximo {x} é de: \033[31m{(pxa2*100):.2f}%')
print()
print(f'\033[36mProbabilidade de no mínimo {x} é de: \033[31m{((px+(1-pxa2))*100):.2f}%')
print()
media = n * p
print(f'\033[36mMédia: \033[31m{media:.2f}')
variancia = float(n*p*(1-p))
desvio_padrao = variancia**0.5
print()
print(f"\033[36mDesvio padrão: \033[31m{desvio_padrao:.2f}")
plt.axis([0,i,0,1])
plt.show()
while True:
print()
continua = input('\033[36mDeseja continuar? [S/n] \033[31m')
if continua in 'NnsS':
break
else:
print()
print('\033[36mERRO!! Digite apenas "S" ou "N"!')
if continua == 'N' or continua == 'n':
break
| [
"matplotlib"
] |
47dca9b8159937665fb1fe94bd57010ce63ae978 | Python | has2k1/plotnine | /tests/test_animation.py | UTF-8 | 2,723 | 2.828125 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import pytest
from plotnine import labs, lims, qplot, theme_minimal
from plotnine.animation import PlotnineAnimation
from plotnine.exceptions import PlotnineError
plt.switch_backend("Agg") # TravisCI needs this
x = [1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5]
colors = [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]
class _PlotnineAnimation(PlotnineAnimation):
"""
Class used for testing
By using this class we only test the creation of
artists and not the animation. This tests all the
plotnine wrapper code and we can trust matplotlib
for the rest.
"""
def __init__(
self, plots, interval=200, repeat_delay=None, repeat=True, blit=False
):
figure, artists = self._draw_plots(plots)
def test_animation():
def plot(i):
return (
qplot(x, y, color=colors[i], xlab="x", ylab="y")
+ lims(color=(1, 7))
+ labs(color="color")
+ theme_minimal()
)
plots = [plot(i) for i in range(3)]
_PlotnineAnimation(plots, interval=100, repeat_delay=500)
def test_animation_different_scale_limits():
def plot(i):
if i == 2:
_lims = lims(color=(3, 7))
else:
_lims = lims(color=(1, 7))
return (
qplot(x, y, color=colors[i], xlab="x", ylab="y")
+ _lims
+ labs(color="color")
+ theme_minimal()
)
plots = [plot(i) for i in range(3)]
with pytest.raises(PlotnineError):
_PlotnineAnimation(plots, interval=100, repeat_delay=500)
def test_animation_different_number_of_scales():
def plot(i):
if i == 2:
p = qplot(x, y, xlab="x", ylab="y")
else:
p = (
qplot(x, y, color=colors[i], xlab="x", ylab="y")
+ lims(color=(1, 7))
+ labs(color="color")
)
return p + theme_minimal()
plots = [plot(i) for i in range(3)]
with pytest.raises(PlotnineError):
_PlotnineAnimation(plots, interval=100, repeat_delay=500)
def test_animation_different_scales():
def plot(i):
c = colors[i]
if i == 2:
p = (
qplot(x, y, color=c, xlab="x", ylab="y")
+ lims(color=(1, 7))
+ labs(color="color")
)
else:
p = (
qplot(x, y, stroke=c, xlab="x", ylab="y")
+ lims(stroke=(1, 7))
+ labs(stroke="stroke")
)
return p + theme_minimal()
plots = [plot(i) for i in range(3)]
with pytest.raises(PlotnineError):
_PlotnineAnimation(plots, interval=100, repeat_delay=500)
| [
"matplotlib"
] |
0e1a0839ec7bd81ef32887f0d8477c262d110fa0 | Python | chowdhurykaushiki/machine-learning | /HousingPrice/load_housing_date.py | UTF-8 | 1,863 | 3.25 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pandas as pd
import os
def load_housing_data(housing_path='C:/KAUSHIKI/Personal/ML/ML_Exercise/HousingPrice'):
file_path = os.path.join(housing_path,"housing.csv")
return pd.read_csv(file_path)
data=load_housing_data()
# see first five rows of dataframe
dataHead=data.head()
# dataframe info : gives infor about the dataframe obj
data.info()
# gives info about the details of the columns
dataDesc=data.describe()
#count of each categorical values
data["ocean_proximity"].value_counts()
#get the unique categorial value
data["ocean_proximity"].unique()
#get the column values using index when you dont know the column name
dataColVal=data.iloc[:,0:3]
#get the column values using column name
datacolVals=data.loc[0:10,"longitude"]
#groupby
#get the avg median housing pricegroup by ocean proximity
dataGroupBy=data.groupby("ocean_proximity").mean()["median_house_value"]
#get mzximum value of a column
data.max()["median_house_value"]
#get count of a colum
data.count()["total_bedrooms"]
data.mean()
#draw a plot
import matplotlib.pyplot as plt
data.head().plot(kind='bar',x="longitude",y="median_house_value")
plt.show()
data.plot(kind='scatter',x='longitude',y='latitude')
plt.show()
#########################################
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
cnt = 0
smallData=data.head(10)
smallData=smallData.drop("income_cat",axis=1)
#smallData["income_cat"]=np.ceil(smallData["median_income"]/1.5)
smallData['income_cat']=pd.Series([2,2,3,2,3,2,2,3,3,3])
split=StratifiedShuffleSplit(n_splits=10,test_size=0.2,random_state=42)
for train_index,test_index in split.split(smallData,smallData["income_cat"]):
cnt+=1
print(train_index)
print(test_index)
strat_train_set=smallData.iloc[train_index]
strat_test_set=smallData.iloc[test_index]
print(cnt)
| [
"matplotlib"
] |
86ef92cf7eb7657886fa1bc2b57bea05a7145798 | Python | xyzhu68/deeplearning | /new/VGG16/data_provider.py | UTF-8 | 4,048 | 2.640625 | 3 | [] | no_license | from keras.utils import to_categorical
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from scipy.ndimage import rotate
from sklearn.utils import shuffle
import random
import matplotlib.pyplot as plt # DEBUG
nbClasses = 20
img_size = 150
# def flip_images(X, y, doFlip):
# if not doFlip:
# #y = to_categorical(y, 36)
# y_E = np.zeros(len(y))
# return (X, y, y_E)
# X = X.reshape(-1, 128, 128)
# x_array = []
# for image in X:
# axis = bool(random.getrandbits(1))
# flipped = np.flip(image, axis)
# x_array.append(flipped)
# x_array = np.asarray(x_array)
# X = x_array.reshape(-1, 128, 128, 1)
# y_E = np.full(len(y), 1.0)
# #y = to_categorical(y, 36)
# return (X, y, y_E)
def appear(X, y, isBase):
if isBase:
size = len(X)
x_array = []
y_array = []
y_array_E = []
for i in range(size):
yValue = np.argmax(y[i])
if yValue < nbClasses // 2:
x_array.append(X[i])
y_array.append(yValue)
y_array_E.append(0)
y_array = to_categorical(y_array, nbClasses)
x_array = np.asarray(x_array)
x_array = x_array.reshape(-1, img_size, img_size, 3)
return (x_array, y_array, y_array_E)
else:
y_array_E = []
for yItem in y:
yValue = np.argmax(yItem)
if yValue >= nbClasses // 2:
y_array_E.append(0)
else:
y_array_E.append(1)
y_array = to_categorical(y, nbClasses)
return (X, y, y_array_E)
def remap(X, y, firstHalf):
if firstHalf:
size = len(X)
x_array = []
y_array = []
y_array_E = []
for i in range(size):
yValue = np.argmax(y[i])
if yValue < nbClasses // 2:
x_array.append(X[i])
y_array.append(yValue)
y_array_E.append(0)
y_array = to_categorical(y_array, nbClasses)
x_array = np.asarray(x_array)
x_array = x_array.reshape(-1, img_size, img_size, 3)
return (x_array, y_array, y_array_E)
else:
size = len(X)
x_array = []
y_array = []
y_array_E = []
for i in range(size):
yValue = np.argmax(y[i])
if (nbClasses//2-1) < yValue < nbClasses : # 9 < yValue < 20
x_array.append(X[i])
y_array.append(yValue - 10)
y_array_E.append(1)
#y_array_E = np.full(len(y_array), 1.0)
y_array = to_categorical(y_array, nbClasses)
x_array = np.asarray(x_array)
x_array = x_array.reshape(-1, img_size, img_size, 3)
y_array = to_categorical(y_array, nbClasses)
return (x_array, y_array, y_array_E)
# def rot(X,y, angle):
# if angle == 0:
# y_E = np.zeros(len(X))
# #y = to_categorical(y, 10)
# return (X, y, y_E)
# else:
# X_result = []
# for image in X:
# X_rot = rotate(image, angle, reshape=False)
# X_result.append(X_rot)
# X_result = np.asarray(X_result)
# X_result = X_result.reshape(-1, 128, 128, 1)
# y_E = np.full(len(y), 1.0)
# #y = to_categorical(y, 10)
# return (X_result, y, y_E)
def transfer(X, y, firstHalf):
x_array = []
y_array = []
y_array_E = []
for i in range(len(X)):
yValue = np.argmax(y[i])
if firstHalf:
if yValue < nbClasses // 2:
x_array.append(X[i])
y_array.append(yValue)
y_array_E.append(0)
else:
if yValue >= nbClasses // 2:
x_array.append(X[i])
y_array.append(yValue)
y_array_E.append(1)
x_array = np.asarray(x_array)
x_array = x_array.reshape(-1, img_size, img_size, 3)
y_array = to_categorical(y_array, nbClasses)
return (x_array, y_array, y_array_E)
| [
"matplotlib"
] |
60b1288fcc679da5d67b9846b1f76f6399fb4b36 | Python | david-j-cox/NLP_for_VerbalBehavior | /VB_NLP_main.py | UTF-8 | 11,920 | 3.375 | 3 | [
"MIT"
] | permissive | """
@author:
David J. Cox, PhD, MSB, BCBA-D
[email protected]
https://www.researchgate.net/profile/David_Cox26
"""
# Packages!!
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk, re, pprint
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
import string
#Set current working directory to the folder that contains your data.
# Home PC
os.chdir('C:/Users/coxda/Dropbox/Projects/Current Project Manuscripts/Empirical/NLP for Skinner\'s VB/Text Files')
# Work Mac
os.chdir('/Users/dcox/Dropbox/Projects/Current Project Manuscripts/Empirical/NLP for Skinner\'s VB/Text Files')
# Download from GitHub repository.
read.txt(text = GET("https://github.com/davidjcox333/NLP_for_VerbalBehavior/blob/master/(1957)%20Verbal%20Behavior%2C%20Skinner.txt"))
# Show current working directory.
dirpath = os.getcwd()
print(dirpath)
# Import Skinner's Verbal Behavior.
VB = open('(1957) Verbal Behavior, Skinner.txt', 'r', encoding='latin-1')
VB_raw = VB.read()
len(VB_raw) # Length of VB.
VB_raw[:100] # First 100 characters of the book.
# Tokenize: break up the text into words and punctuation.
nltk.download() # We need to download the 'punkt' model from nltk.
tokens_word = nltk.word_tokenize(VB_raw)
tokens_word[:50] # Compare with the above! Now we're working with words and punctuation as the unit.
len(tokens_word) # How many words and punctuation characters do we have?
# Let's find the slice that contains just the text itself, and not the foreword or notes at the end.
# We know the first part begins with "Part I A Program"
VB_raw.find("Part I\n A PROGRAM") # \n is the symbol for the return sign to start a new line.
VB_raw[47046:47080] # Print off some of the text that follows to make sure we've identified it correctly.
# We also know the book ends with a sentence that comprises a paragraph. That final sentence is, "The study of the verbal
# behavior of speaker and listener, as well as of the practices of the verbal environment which generates such behavior,
# may not contribute directly to historical or descriptive linguistics, but it is enough for our present purposes
# to be able to say that a verbal environment could have arisen from nonverbal sources and, in its transmission
# from generation to generation, would have been subject to influences which might account for the multiplication of
# forms and controlling relations and the increasing effectiveness of verbal behavior as a whole."
VB_raw.find("the increasing effectiveness of verbal behavior as a whole.") # Where does this phrase start?
VB_raw[1167958:1168017] # SLice to the end to catch this phrase.
VB_raw[1167436:1168017] # And let's slice in the rest of the paragraph just to be sure there weren't a few sentnece with this phrase in the book.
# Great! Let's slice in the text of the book using the identified start and end points.
VB_text = VB_raw[47046:1168017]
print(VB_text[:50])
# And let's clean this up for analysis by removing punctuation and making all the words lowercase.
VB_nopunct = re.sub(r'[^\w\s]', '', VB_text)
VB_nopunct[:75]
VB_clean = [w.lower() for w in VB_nopunct]
VB_clean[:50]
VB_vocab = sorted(set(VB_clean))
# Tokenize this.
tokens = nltk.word_tokenize(VB_text)
text = nltk.Text(tokens)
text[:50]
# Make all of the words lowercase.
VB_clean=[w.lower() for w in text]
VB_clean[:50]
# Remove stop words.
VB_stop_elim = [w for w in VB_clean if not w in stop_words]
VB_stop_elim[:50]
# Change punctuation to empty string.
VB_stop_elim = [''.join(c for c in s if c not in string.punctuation) for s in VB_stop_elim]
VB_stop_elim[:50]
# Remove empty string.
VB_filtered = [s for s in VB_stop_elim if s]
VB_filtered[:50]
''''''''''''''''''''''''''''''''''''''''''
# Now we can play!
''''''''''''''''''''''''''''''''''''''''''
# How many times does Skinner mention different verbal operants.
mand_count = VB_filtered.count('mand') + VB_filtered.count('mands') + VB_filtered.count('manded')
tact_count = VB_filtered.count('tact') + VB_filtered.count('tacts') + VB_filtered.count('tacted')
echoic_count = VB_filtered.count('echoic') + VB_filtered.count('echoics')
intraverbal_count = text.count('intraverbal')+ text.count('intraverbals')
textual_count = VB_filtered.count('textual') + VB_filtered.count('textuals')
transcription_count = VB_filtered.count('transcription') + VB_filtered.count('transcriptions')
# What are the context surrounding there use?
text.concordance('mand' or 'mands' or 'manded')
text.concordance('tact'or 'tacts' or 'tacted') # Same questions for 'tact'?
# Get the data ready for plotting.
vb_operant_data = [mand_count, tact_count, echoic_count, intraverbal_count, textual_count, transcription_count]
bars = ('Mand', 'Tact', 'Echoic', 'Intraverbal', 'Textual', 'Transcription')
y_pos = np.arange(len(bars))
# PLot it.
plt.bar(y_pos, vb_operant_data, color='black')
plt.xticks(y_pos, bars)
plt.ylabel('Count in Verbal Behavior (Skinner, 1957)')
plt.show()
# How about the same for speaker and listener?
speaker_count= VB_filtered.count('speaker') + VB_filtered.count('speakers')
listener_count = VB_filtered.count('listener') + VB_filtered.count('listeners')
sp_li_data = [speaker_count, listener_count]
bars = ('Speaker', 'Listener')
y_pos = np.arange(len(bars))
plt.bar(y_pos, sp_li_data, color='black', width = 0.6, align='center')
plt.xticks(y_pos, bars)
plt.ylabel('Count in Verbal Behavior (Skinner, 1957)')
plt.show()
# Can we do a wordcloud of the entire book?
from PIL import Image
from wordcloud import WordCloud
# Load a picture of Skinner's face that will be used for wordcloud shape.
cloud_mask = np.array(Image.open("Skinner.jpg"))
# Create the wordcloud.
VB_words = [w.lower() for w in VB_filtered]
VB_word_string = ' '.join(VB_words)
wordcloud = WordCloud(mask=cloud_mask).generate(VB_word_string)
# Show wordcloud
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
# How about a frequency distribution of the top 50 words used in VB?
fdist = nltk.FreqDist(VB_filtered)
top_50 = fdist.keys()
fdist.plot(39, cumulative = True)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Let's do some NLP. Officially, a scatter plot of PCA projection of Word2Vec Model.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from nltk.tokenize import sent_tokenize
from gensim.models import Word2Vec
from nltk.corpus import stopwords
sentences = nltk.sent_tokenize(VB_text) # Break the text into sentences.
sentences[:5] # Take a peek at the first 5 sentences.
sent =[] # Create an empty list we'll put each of our word tokenized sentences into.
stop_words = set(stopwords.words('english')) # Identify the stopwords to clean from text.
for i in sentences: # Break each sentence into words, and then append the empty list we just created.
words = nltk.word_tokenize(i)
stop_elim_sentences = [w for w in words if not w in stop_words]
stop_elim_sentences_2 = [''.join(c for c in s if c not in string.punctuation) for s in stop_elim_sentences]
filtered_sentences = [s for s in stop_elim_sentences_2 if s]
sent.append(filtered_sentences)
sent[:10] # Take a peek at the first to make sure everything looks good.
# Train model.
model = Word2Vec(sent, min_count=1)
# Summarize loaded model
print(model)
# Summarize vocabulary.
NLP_words = list(model.wv.vocab)
print(NLP_words)
len(NLP_words)
# Access vector for one word.
print(model['mand'])
# Save model
model.save('model.bin')
# Load model.
new_model = Word2Vec.load('model.bin')
print(new_model)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Plot it up using PCA to create the 2-dimensions for plotting.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from sklearn.decomposition import PCA
# Train model.
model = Word2Vec(sent, min_count = 1)
# Fit a 2D PCA model to the vectors.
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# Create scatter plot of the projection.
plt.scatter(result[:, 0], result[:, 1], marker='')
plt.ylim(-.15, .065)
plt.xlim(-.5, 9)
plt.title('Scatter of PCA Projection of Word2Vec Model')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
words = list(model.wv.vocab)
for i, word in enumerate(words):
plt.annotate(word, xy=(result[i, 0], result[i, 1]))
plt.show()
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Plot it up using PCA to create the 3-dimensions for plotting.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from mpl_toolkits.mplot3d import axes3d, Axes3D
# Train model.
model = Word2Vec(sent, min_count = 1)
# Fit a 3D PCA model to the vectors.
X = model[model.wv.vocab]
pca = PCA(n_components=3)
pca.fit(X)
result = pd.DataFrame(pca.transform(X), columns=['PCA%i' % i for i in range(3)])
# Plot initialization.
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(result['PCA0'], result['PCA1'], result['PCA2'], c='black', cmap="Set2_r", s=60)
for i, word in enumerate(words):
ax.annotate(word, (result['PCA1'][i], result['PCA0'][i]))
# Simple bare axis lines through space:
xAxisLine = ((min(result['PCA0']), max(result['PCA0'])), (0,0), (0, 0))
ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r')
yAxisLine = ((0,0), (min(result['PCA1']), max(result['PCA1'])), (0,0))
ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r')
zAxisLine = ((0,0), (0,0), (min(result['PCA2']), max(result['PCA2'])))
ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r')
#label the axes
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
ax.set_zlabel("PC3")
ax.xaxis(-1, 10)
ax.yaxis(-5, 5)
ax.zaxis(-10, 10)
ax.set_title("3D Scatter of PCA of Word2Vec Model")
words = list(model.wv.vocab)
fig
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
That's messy. So, let's use Latent Dirichlet Allocation (LDA)
to plot all of the words grouped into 20 topics using machine learning.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(max_features=10000)
X = vect.fit_transform(words)
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=20, learning_method="batch", max_iter=50, random_state=3)
document_topics = lda.fit_transform(X)
print("lda.componenets_.shape: {}".format(lda.components_.shape))
sorting=np.argsort(lda.components_, axis=1)[:, ::-1]
feature_names=np.array(vect.get_feature_names())
import mglearn
mglearn.tools.print_topics(topics=range(20), feature_names=feature_names, sorting=sorting, topics_per_chunk=5, n_words=10)
fig, ax = plt.subplots(1, 2, figsize=(10, 10))
topic_names = ["{:>2} ".format(i) + " ".join(words) \
for i, words in enumerate(feature_names[sorting[:, :2]])]
for col in [0, 1]:
start = col * 10
end = (col +1) * 10
ax[col].barh(np.arange(10), np.sum(document_topics, axis=0)[start:end], color='black')
ax[col].set_yticks(np.arange(10))
ax[col].set_yticklabels(topic_names[start:end], ha="left", va="top")
ax[col].invert_yaxis()
ax[col].set_xlim(0, 700)
yax=ax[col].get_yaxis()
yax.set_tick_params(pad=130)
plt.tight_layout()
plt.show()
# Use PCA to plot the resulting topics.
# Train model.
model = Word2Vec(feature_names, min_count = 1)
# Fit a 2D PCA model to the vectors.
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# Create scatter plot of the projection.
plt.scatter(result[:, 0], result[:, 1], marker='o', color='black')
plt.ylim(-1, 1)
plt.xlim(-1, 1)
plt.title('Scatter of PCA Projection of Word2Vec Model')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
words = list(topic_names)
for i, words in enumerate():
plt.annotate(words, xy=(result[i, 0], result[i, 1]))
plt.show()
| [
"matplotlib"
] |
149f7c03c3209f7411db4f7b264db79d7b8199f9 | Python | aygulmardanova/python-start | /generate-data/generate-cluster-data.py | UTF-8 | 248 | 2.78125 | 3 | [] | no_license | from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
X = make_blobs(n_samples=3000, centers=4, n_features=1, center_box=(-20.0, 20.0))
print X
plt.plot(X[0], X[1], 'ro')
plt.axis([-20, 20, -1, 5])
plt.show()
| [
"matplotlib"
] |
1005f478daf50a6591f88bd63bb819d5ca6fe322 | Python | bdosremedios/Y2_Computational_Physics_Projects | /bdosremedios_assignment_8/linear_fit.py | UTF-8 | 1,966 | 3.65625 | 4 | [] | no_license | import numpy as np #imports numpy abbreviated to np
import scipy.optimize as spo #imports scipy.optimize abbreviated to spo
import matplotlib.pyplot as plt #imports matplotlib.pyplot as abbreviated plt
# float float float -> float
# takes in b x and c and calculates and returns y; y = bx + c
def calcy(x,b,c):
return b*x+c
examplearray = np.loadtxt("examplearray.txt") #loads array from examplearray.txt
print(examplearray) #prints example array
xdata = examplearray[:,1] #store x col in xdata
ydata = examplearray[:,2] #store y col in ydata
sigma1 = examplearray[:,4] #store yerror in sigma1
linearfit = spo.curve_fit(calcy, xdata, ydata, sigma=sigma1, absolute_sigma=True) #optimizes curve for data in examplearray.txt using calcy giving b and c
parameterserr = np.sqrt(np.diag(linearfit[1])) #calculates and stores error in parameters
parameters = linearfit[0] #stores b and c in parameters
count = range(0,6) #creates range to count over for for loop
linearfity = np.array([0.0,0.0,0.0,0.0,0.0,0.0]) #creats empty array to store values
for i in count: #applies linear fit parameters and function to each x in x storing in linear fity
y = calcy(xdata[i], parameters[0], parameters[1])
linearfity[i] = y
plt.errorbar(xdata, ydata, sigma1, fmt="rD") #creates plot line with error bards
plt.plot(xdata, linearfity, "b-") #creates best fit line
plt.xlabel("x values") #labels x axis
plt.ylabel("y values") #labels y axis
plt.title("Linear fit of examplearray.txt for exercise 1.") # adds title to plot
plt.savefig("linear_fit.pdf") #saves plot as linear_fit.pdf
plt.show(block=False) #shows plot
file=open("linear_fit.txt", "w") #opens file linear_fit.txt to write
file.write("b = {} +/- {}\n".format (parameters[0], parameterserr[0])) #writes parameter b and standard deviation of b in file
file.write("c = {} +/- {}\n".format (parameters[1], parameterserr[1])) #writes parameter c and standard deviation of a in file
file.close() #closes file
| [
"matplotlib"
] |
7450180c4f3b654b61cdce757044a479e054735d | Python | lizametcalfe/API-code | /Job_desc_api/Adzuna_API_python_code.py | UTF-8 | 2,392 | 2.96875 | 3 | [] | no_license | from urllib2 import Request, urlopen, URLError
import json
from pandas.io.json import json_normalize
import requests
import pandas as pd
import matplotlib
import numpy as np
import time
import sys
#stop truncating of strings
pd.options.display.max_seq_items = 2000
pd.options.display.max_colwidth = 1000
N = 10000
#go up from here
#Courses: https://www.codecademy.com/apis
#URL for Adzuna
#Parameters:
# First page
# unique API key, please register on the website and change this to your own: https://developer.adzuna.com/overview
# category is IT job, this can be removed (collect all) or changed to reflect what you would like to collect.
# To see other options see: http://api.adzuna.com/static/swagger-ui/index.html#!/adzuna/search_get_0
the = 'http://api.adzuna.com/v1/api/jobs/gb/search/1?app_id="add your appd id here"&app_key="add your app key here"&category=it-jobs&results_per_page=50&content-type=application/text/html'
#start from here
#request API results for the URL of interest and return dataframe, return "That's your lot!" when finished.
#pause for 5 seconds between requests
def adzunalapi(url):
time.sleep(5)
request = Request(url)
try:
response = urlopen(request)
resp = response.read()
data = json.loads(resp)
except URLError, e:
print 'No response. Got an error code:', e
try:
df = json_normalize(data['results'])
return df
except:
sys.exit("That's your lot!")
#Cycle through the URL's changing the page number from 1 to 2...
df = pd.DataFrame({ 'A' : range(720, N + 1 ,1)})
df["URL"]= df["A"].apply(lambda x: the.replace("1?",str(x)+"?"))
dff=pd.DataFrame()
for i in df["URL"]:
print(i)
try:
dff = dff.append(adzunalapi(i))
except:
pass
#simplify the created variable
dff=dff.reset_index()
dff["created2"]=dff["created"].apply(lambda x: str(x)[8:10])
dff["created2"]=dff["created2"].apply(lambda x: x.replace("-",""))
#take out the regional information from the location field
foo = lambda x: pd.Series([i for i in str(x).split(',')])
dff["region"] = dff['location_area'].apply(foo)[1]
dff["region"] = dff["region"].apply(lambda x: str(x).replace("',","").replace("u","").replace("]",""))
#save data
dff.to_csv("C:\\.....csv", encoding='utf-8')
dff = dff.reset_index() | [
"matplotlib"
] |
387b5834cf698e720fa255e03486b16125a2737b | Python | Oschart/Embedded-Heart-Monitor | /main_app/main.py | UTF-8 | 3,450 | 2.59375 | 3 | [] | no_license | import serial
import serial.tools.list_ports
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.ticker as ticker
import PySimpleGUI as sg
from app_utils import *
print('Welcome to my ECG application!')
# Get list of available ports
comports_list = list(
map(lambda p: p.device, serial.tools.list_ports.comports()))
sg.theme('DarkAmber') # Add a touch of color
window = sg.Window('ECG Heart Monitor', connect_layout(comports_list),
size=(400, 250), element_justification='center')
ani, ax, values, x_cnt = None, None, None, 0
num_of_samples = 0
# action callbacks
def SSR():
global num_of_samples
X = values['srate']
num_of_samples = int(X)*60
print(X)
print('number of samples = ', num_of_samples)
cmd = 'SSR ' + X + '$'
uC_transmit(serial_p, cmd)
def C1MWD():
global ax, ani, x_cnt
cmd = 'C1MWD$'
uC_transmit(serial_p, cmd)
x_cnt = 0
fig = plt.figure(1,figsize=(15, 7), dpi=80)
fig.canvas.mpl_connect('close_event', TEARUP)
ax = fig.add_subplot(1, 1, 1)
xs = []
ys = []
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=10)
plt.show(block=False)
def RHBR():
cmd = 'RHBR$'
uC_transmit(serial_p, cmd)
hbr = uC_receive(serial_p)
if hbr == -2:
hbr = '-'
while hbr == -1:
hbr = uC_receive(serial_p)
window['HBR'].update(hbr)
def TEARUP(evt):
global ani
print('transmission tear-up!')
uC_transmit(serial_p, '#')
ani.event_source.stop()
plt.close()
res = uC_receive(serial_p)
while res != -1:
res = uC_receive(serial_p)
button_actions = {
'SSR': SSR,
'C1MWD': C1MWD,
'RHBR': RHBR,
}
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
# Check if figure window is still open
if plt.fignum_exists(1) == False:
return
global ax, ani, x_cnt
# Read heart pulse from uC
beat = int(uC_receive(serial_p))
print(beat)
if beat == -1:
ani.event_source.stop()
return
elif beat == -2:
beat = 2048
# Add x and y to lists
xs.append(x_cnt)
ys.append(beat)
x_cnt = x_cnt+1
# Limit x and y lists to 20 items
#xs = xs[-30:]
#ys = ys[-30:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys, color='r', linewidth=0.5)
ax.xaxis.set_major_locator(ticker.MultipleLocator(num_of_samples*0.1))
plt.gca().set_ylim([0, 4096])
plt.gca().set_xlim([0, num_of_samples])
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Heart Activity')
# plt.grid()
while True:
event, values = window.read()
print(values)
print(event)
if event in (None, 'Exit'): # if user closes window or clicks exit
break
if event == 'Start':
com_port = values['com']
baud_rate = int(values['baudrate'])
# Open the serial port
serial_p = serial.Serial(port=com_port, baudrate=baud_rate,
bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)
# Initiate control panel layout
window.close()
window = sg.Window('ECG Heart Monitor', control_layout(),
size=(500, 500), element_justification='center')
if event in button_actions:
button_actions[event]()
window.close() | [
"matplotlib"
] |
f94cbec028a17e8b38758ebadbee62812a3340b3 | Python | ManyBodyPhysics/LectureNotesPhysics | /doc/src/Chapter10-programs/python/srg_nn/srg_nn.py | UTF-8 | 10,160 | 2.65625 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python
#------------------------------------------------------------------------------
# srg_nn.py
#
# author: H. Hergert
# version: 1.1.0
# date: Nov 21, 2016
#
# tested with Python v2.7
#
# SRG evolution of a chiral NN interaction with cutoff Lambda in the deuteron
# partial waves, using a Gauss-Legendre momentum mesh.
#
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import SymLogNorm, Normalize
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
import numpy as np
from numpy import array, dot, diag, reshape, sqrt
from math import sqrt, pi
from scipy.linalg import eigvalsh
from scipy.integrate import ode
#------------------------------------------------------------------------------
# constants
#------------------------------------------------------------------------------
hbarm = 41.4710570772
#------------------------------------------------------------------------------
# helpers
#------------------------------------------------------------------------------
def find_nearest(array, value):
distance = np.absolute(array-value)
indices = np.where(distance == np.min(distance))
return indices[0]
#------------------------------------------------------------------------------
# plot matrix snapshots
#------------------------------------------------------------------------------
def plot_snapshots(Hs, flowparams, momenta, qMax):
fig = plt.figure(1, (10., 50.))
nplots = len(flowparams)
ncols = 1
nrows = nplots
grid = AxesGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(nrows, ncols), # creates grid of axes
axes_pad=1., # pad between axes in inch.
label_mode='all', # put labels on left, bottom
cbar_mode='each', # color bars
cbar_pad=0.20, # insert space between plots and color bar
cbar_size='10%' # size of colorbar relative to last image
)
hmax = 0.0
hmin = 0.0
for h in Hs:
hmax = max(hmax, np.ma.max(h))
hmin = min(hmin, np.ma.min(h))
# get indices of max. momenta
cmax, ccmax = find_nearest(momenta, qMax)
edge = len(momenta)/2
# create individual snapshots - figures are still addressed by single index,
# despite multi-row grid
for s in range(Hs.shape[0]):
h = np.vstack((np.hstack((Hs[s,0:cmax,0:cmax], Hs[s,0:cmax,edge:ccmax])),
np.hstack((Hs[s,edge:ccmax,0:cmax], Hs[s,edge:ccmax,edge:ccmax]))
))
img = grid[s].imshow(h,
cmap=plt.get_cmap('RdBu_r'), # choose color map
interpolation='bicubic',
# filterrad=10,
norm=SymLogNorm(linthresh=0.0001, vmax=2., vmin=-2.0), # normalize
vmin=-2.0, # min/max values for data
vmax=2.0
)
# contours
levels = np.arange(-2, 1, 0.12)
grid[s].contour(h, levels, colors='black', ls="-", origin='lower',linewidths=1)
# plot labels, tick marks etc.
grid[s].set_title('$\\lambda=%s\,\mathrm{fm}^{-1}$'%flowparams[s])
grid[s].set_xticks([0,20,40,60,80,100,120,140,160])
grid[s].set_yticks([0,20,40,60,80,100,120,140,160])
grid[s].set_xticklabels(['$0$','$1.0$','$2.0$','$3.0$','$4.0$','$1.0$','$2.0$','$3.0$','$4.0$'])
grid[s].set_yticklabels(['$0$','$1.0$','$2.0$','$3.0$','$4.0$','$1.0$','$2.0$','$3.0$','$4.0$'])
grid[s].tick_params(axis='both',which='major',width=1.5,length=5)
grid[s].tick_params(axis='both',which='minor',width=1.5,length=5)
grid[s].axvline(x=[80],color="black", ls="--")
grid[s].axhline(y=[80],color="black", ls="--")
grid[s].xaxis.set_label_text("$q\,[\mathrm{fm}^{-1}]$")
grid[s].yaxis.set_label_text("$q'\,[\mathrm{fm}^{-1}]$")
# color bar
cbar = grid.cbar_axes[s]
plt.colorbar(img, cax=cbar,
ticks=[ -2.0, -1.0, -1.0e-1, -1.0e-2, -1.0e-3, -1.0e-4, 0.,
1.0e-4, 1.0e-3, 1.0e-2, 1.0e-1, 1.0]
)
cbar.axes.set_yticklabels(['$-2.0$', '$-1.0$', '$-10^{-1}$', '$-10^{-2}$',
'$-10^{-3}$', '$-10^{-4}$','$0.0$', '$10^{-4}$', '$10^{-3}$', '$10^{-2}$',
'$10^{-1}$', '$1.0$'])
cbar.set_ylabel("$V(q,q')\,\mathrm{[fm]}$")
# save figure
plt.savefig("srg_n3lo500.pdf", bbox_inches="tight", pad_inches=0.05)
plt.savefig("srg_n3lo500.png", bbox_inches="tight", pad_inches=0.05)
#plt.show()
return
#------------------------------------------------------------------------------
# matrix element I/O, mesh functions
#------------------------------------------------------------------------------
def uniform_weights(momenta):
weights = np.ones_like(momenta)
weights *= abs(momenta[1]-momenta[0])
return weights
def read_mesh(filename):
data = np.loadtxt(filename, comments="#")
dim = data.shape[1]
momenta = data[0,:dim]
return momenta
def read_interaction(filename):
data = np.loadtxt(filename, comments="#")
dim = data.shape[1]
V = data[1:,:dim]
return V
#------------------------------------------------------------------------------
# commutator
#------------------------------------------------------------------------------
def commutator(a,b):
return dot(a,b) - dot(b,a)
#------------------------------------------------------------------------------
# flow equation (right-hand side)
#------------------------------------------------------------------------------
def derivative(lam, y, T):
dim = T.shape[0]
# reshape the solution vector into a dim x dim matrix
V = reshape(y, (dim, dim))
# calculate the generator
eta = commutator(T, V)
# dV is the derivative in matrix form
dV = -4.0/(lam**5) * commutator(eta, T+V)
# convert dH into a linear array for the ODE solver
dy = reshape(dV, -1)
return dy
#------------------------------------------------------------------------------
# Main program
#------------------------------------------------------------------------------
def main():
# duplicate the mesh points (and weights, see below) because we have a
# coupled-channel problem
mom_tmp = read_mesh("n3lo500_3s1.meq")
momenta = np.concatenate([mom_tmp,mom_tmp])
weights = uniform_weights(momenta)
dim = len(momenta)
# set up p^2 (kinetic energy in units where h^2/2\mu = 1)
T = diag(momenta*momenta)
# set up interaction matrix in coupled channels:
#
# / V_{3S1} V_{3S1-3D1} \
# \ V_{3S1-3D1}^\dag V_{3D1} /
# read individual partial waves
partial_waves=[]
for filename in ["n3lo500_3s1.meq", "n3lo500_3d1.meq", "n3lo500_3sd1.meq"]:
partial_waves.append(read_interaction(filename))
# print partial_waves[-1].shape
# assemble coupled channel matrix
V = np.vstack((np.hstack((partial_waves[0], partial_waves[2])),
np.hstack((np.transpose(partial_waves[2]), partial_waves[1]))
))
# switch to scattering units
V = V/hbarm
# set up conversion matrix for V: this is used to absorb momentum^2 and
# weight factors into V, so that we can use the commutator routines for
# eta and the derivative as is
conversion_matrix = np.zeros_like(T)
for i in range(dim):
for j in range(dim):
# Regularize the conversion matrix at zero momentum - set elements
# to machine precision so we can invert the matrix for plots etc.
# Note that momentum values are positive, by construction.
qiqj = max(np.finfo(float).eps, momenta[i]*momenta[j])
conversion_matrix[i,j] = qiqj*sqrt(weights[i]*weights[j])
V *= conversion_matrix
# turn initial interaction into a linear array
y0 = reshape(V, -1)
# flow parameters for snapshot images - the initial lambda should be
# infinity, we use something reasonably large
lam_initial = 20.0
lam_final = 1.5
# integrate using scipy.ode instead of scipy.odeint - this gives
# us more control over the solver
solver = ode(derivative,jac=None)
# equations may get stiff, so we use VODE and Backward Differentiation
solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)
solver.set_f_params(T)
solver.set_initial_value(y0, lam_initial)
print("%-8s %-14s"%("s", "E_deuteron [MeV]"))
print("-----------------------------------------------------------------------------------------------------------------")
# calculate exact eigenvalues
print("%8.5f %14.8f"%(solver.t, eigvalsh((T + V)*hbarm)[0]))
flowparams=([lam_initial])
Vs=([V])
while solver.successful() and solver.t > lam_final:
# adjust the step size in different regions of the flow parameter
if solver.t >= 6.0:
ys = solver.integrate(solver.t-1.0)
elif solver.t < 6.0 and solver.t >= 2.5:
ys = solver.integrate(solver.t-0.5)
elif solver.t < 2.5 and solver.t >= lam_final:
ys = solver.integrate(solver.t-0.1)
# add evolved interactions to the list
flowparams.append(solver.t)
Vtmp = reshape(ys,(dim,dim))
Vs.append(Vtmp)
print("%8.5f %14.8f"%(solver.t, eigvalsh((T + Vtmp)*hbarm)[0]))
# generate snapshots of the evolution
plot_snapshots((Vs[-2:]/conversion_matrix), flowparams[-2:], momenta, 4.0)
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main() | [
"matplotlib"
] |
9687e2ef03ae47ee8da4844d73ec64b309f7fa7d | Python | sbaio/Restricted-Boltzmann-Machine | /Code/show_images.py | UTF-8 | 2,206 | 2.890625 | 3 | [] | no_license |
from loadMNIST import load_mnist
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def showImage(image):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def show_10_Images(image):
fig = plt.figure()
for i in range(10):
ax = fig.add_subplot(2,5,i+1)
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def showImages(images):
# for small number of images
fig = plt.figure()
n = len(images)
for i in range(n):
ax = fig.add_subplot(1,n,i+1)
image = images[i]
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def plot_10_by_10_images(images):
""" Plot 100 MNIST images in a 10 by 10 table. Note that we crop
the images so that they appear reasonably close together. The
image is post-processed to give the appearance of being continued."""
n = images.shape[0]
q = n // 10
r = n%10
print n,q,r
fig = plt.figure()
plt.ion()
for x in range(q):
print x
if not x%10:
plt.clf()
for y in range(10):
ax = fig.add_subplot(10, 10, 10*y+x%10+1)
ax.matshow(images[10*y+x%10], cmap = mpl.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
_=raw_input("Press enter to show next 10")
def generate_random_image():
# generate random image of type uint8 and size 28*28
a = np.random.randint(256,size=28*28,dtype='uint8')
a = a.reshape((28,28))
return a
def image_to_vector(im):
b = np.squeeze(im.reshape((-1,1)))/255.
return b
def vec_to_image(vec):
b = np.reshape(vec,(28,28))
return b
images, labels = load_mnist('training', digits=np.arange(10), path = '../Data/')
a = generate_random_image()
#a = images[0]
#b = np.squeeze(a.reshape((-1,1)))/255.
#print b.shape
#print b[:]
showImage(images[0])
#c = vec_to_image(b)
#showImage(c)
#showImages([a,c])
#showImage(d)
#print c.shape
| [
"matplotlib"
] |
46b226c5cf3ae78e733beaf9f72e0a5ee60b62ae | Python | antonkast-google/rw-dynamicworld-cd | /wri_change_detection/hmm.py | UTF-8 | 17,092 | 2.65625 | 3 | [
"MIT"
] | permissive |
import ee
import requests
import numpy as np
import cmocean
from rasterio.plot import show_hist, show
import pandas as pd
from sklearn import metrics
import matplotlib.colors
import matplotlib.pyplot as plt
DW_CLASS_LIST = ['water','trees','grass','flooded_vegetation','crops','scrub','built_area','bare_ground','snow_and_ice']
DW_CLASS_COLORS = ["419BDF", "397D49", "88B053", "7A87C6", "E49635", "DFC35A", "C4281B", "A59B8F", "B39FE1"]
change_detection_palette = ['#ffffff', # no_data=0
'#419bdf', # water=1
'#397d49', # trees=2
'#88b053', # grass=3
'#7a87c6', # flooded_vegetation=4
'#e49535', # crops=5
'#dfc25a', # scrub_shrub=6
'#c4291b', # builtup=7
'#a59b8f', # bare_ground=8
'#a8ebff', # snow_ice=9
'#616161', # clouds=10
]
statesViz = {'min': 0, 'max': 10, 'palette': change_detection_palette}
boundary_scale = [-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.6, 9.5, 10.5] #bin edges
dw_class_cmap=matplotlib.colors.ListedColormap(change_detection_palette)
dw_norm=matplotlib.colors.BoundaryNorm(boundary_scale, len(change_detection_palette))
labels_dict = {
0:'no_data',
1:'water',
2:'trees',
3:'grass',
4:'flooded_veg',
5:'crops',
6:'scrub',
7:'builtup',
8:'bare_ground',
9:'snow_ice',
10:'clouds'
}
classes_dict = {
0:'water',
1:'trees',
2:'grass',
3:'flooded_vegetation',
4:'crops',
5:'scrub',
6:'built_area',
7:'bare_ground',
8:'snow_and_ice'
}
def retrieve_filtered_img(collection, year, target_img, reducer=ee.Reducer.mean(),scale=10):
"""
Function to filter, clip, and reduce a collection
Args:
collection (ee.imagecollection.ImageCollection): image to download
year (int or str): Year to pass to an ee filterDate function
target_img (ee.image.Image): Clip the collection geometry to this image geometry
reducer (ee.Reducer): reducer method to perform on the collection
scale (int): scale in meters
Returns:
out_image (ee.image.Image): date filtered, geometry clipped image
"""
out_image = collection.filterDate(
f'{year}-01-01', f'{year}-12-31').reduce(
reducer).clipToBoundsAndScale(
geometry=target_img.geometry(),scale=scale).toFloat()
return out_image
def stream_file_to_disc(url, filename):
"""
Function to instantiate a Requests session to facilitate a download from url
Args:
url (string): url to initiate download
filename (string): file path and file name to write download stream to
Returns:
Writes download to the specified location
"""
with requests.Session() as session:
get = session.get(url, stream=True)
if get.status_code == 200:
#print('downloading')
with open(filename, 'wb') as f:
for chunk in get.iter_content(chunk_size=1024):
f.write(chunk)
else:
print(get.status_code)
def write_block(img, dst_filename, temp_dir):
"""
Function to download specified ee image
Args:
img (ee.image.Image): image to download
dst_filename (String): file name to write to
temp_dir (tempfile.TemporaryDirectory): will be used as os path prefix to `dst_filename`
arg to construct path
Returns:
Constructs download url for specified img, downloads geotiff to destination
"""
url = ee.data.makeThumbUrl(ee.data.getThumbId({'image':img, 'format':'geotiff'}))
filepath = f'{temp_dir}/{dst_filename}'
stream_file_to_disc(url, filepath)
def calc_annual_change(model, years):
"""
Function to calculate annual total class change between years
Args:
model (dict): year ints as keys and rasterio dataset pointers as values
years (list): years to loop through for calculating annual change
Returns:
Prints annual fraction of sample pixels changing classes
"""
for i in years[:-1]:
y0 = model[f'{i}'].read().argmax(axis=0).ravel()
y1 = model[f'{i+1}'].read().argmax(axis=0).ravel()
accuracy_score = metrics.accuracy_score(y0,y1)
yr_change = 1-accuracy_score
print(f'{i}-{i+1}: {yr_change:.2f}')
def show_year_diffs_and_classes(dwm, hmm, years, cmap='gray'):
"""
Function to plot change layer between predicted labels for each year
Args:
dwm (dict): Dynamic World Model outputs, with year ints as keys and rasterio dataset pointers as values
hmm (dict): Hidden Markov Model outputs, with year ints as keys and rasterio dataset pointers as values
years (list): years to loop through for calculating annual change
cmap (cmap): colormap
Returns:
Plots change layer between predicted class labels for each year
"""
fig, axs = plt.subplots(len(years)-1,6,figsize=(6*3,(len(years)-1)*3))
for i,x in enumerate(years[:-1]):
show(dwm[f'{x}'].read().argmax(axis=0)+1,ax=axs[i,0],
cmap=dw_class_cmap,norm=dw_norm,title=f'dwm {x}')
show(np.equal(dwm[f'{x}'].read().argmax(axis=0),dwm[f'{x+1}'].read().argmax(axis=0)),
ax=axs[i,1], cmap=cmap, title=f'dwm {x}-{x+1}')
show(dwm[f'{x+1}'].read().argmax(axis=0)+1,ax=axs[i,2],
cmap=dw_class_cmap,norm=dw_norm,title=f'dwm {x+1}')
show(hmm[f'{x}'].read().argmax(axis=0)+1,ax=axs[i,3],
cmap=dw_class_cmap,norm=dw_norm,title=f'hmm {x}')
show(np.equal(hmm[f'{x}'].read().argmax(axis=0),hmm[f'{x+1}'].read().argmax(axis=0)),
ax=axs[i,4], cmap=cmap,title=f'hmm {x}-{x+1}')
show(hmm[f'{x+1}'].read().argmax(axis=0)+1,ax=axs[i,5],
cmap=dw_class_cmap,norm=dw_norm,title=f'hmm {x+1}')
return plt.show()
def show_year_diffs(dwm,hmm,years, cmap='gray'):
"""
Function to plot change between years
Args:
dwm (dict): Dynamic World Model outputs, with year ints as keys and rasterio dataset pointers as values
hmm (dict): Hidden Markov Model outputs, with year ints as keys and rasterio dataset pointers as values
years (list): years to loop through for calculating annual change
cmap (cmap): colormap
Returns:
Plots annual change layer for each year
"""
fig, axs = plt.subplots(2,len(years)-1,figsize=((len(years)-1)*6,2*6))
for i,x in enumerate(years[:-1]):
show(np.equal(dwm[f'{x}'].read().argmax(axis=0),dwm[f'{x+1}'].read().argmax(axis=0)),
ax=axs[0,i], cmap=cmap, title=f'dwm {x}-{x+1}')
show(np.equal(hmm[f'{x}'].read().argmax(axis=0),hmm[f'{x+1}'].read().argmax(axis=0)),
ax=axs[1,i], cmap=cmap,title=f'hmm {x}-{x+1}')
return plt.show()
def show_max_probas(dwm,hmm,years, cmap=cmocean.cm.rain):
"""
Function to plot max probability across bands for each year
Args:
dwm (dict): Dynamic World Model outputs, with year ints as keys and rasterio dataset pointers as values
hmm (dict): Hidden Markov Model outputs, with year ints as keys and rasterio dataset pointers as values
years (list): years to loop through for calculating annual change
cmap (cmap): colormap
Returns:
Plots max probability per cell for each year
"""
fig, axs = plt.subplots(len(years),2,figsize=(2*4,len(years)*4))
for i,x in enumerate(years):
show(dwm[f'{x}'].read().max(axis=0), ax=axs[i,0], cmap=cmap, vmin=0, vmax=1,title=x)
show(hmm[f'{x}'].read().max(axis=0), ax=axs[i,1], cmap=cmap, vmin=0, vmax=1,title=x)
return plt.show()
def show_normalized_diff(hmm_outputs, dwm_outputs, year, cmap=cmocean.cm.balance, band_names=DW_CLASS_LIST):
"""
Function produce normalized difference plots for probability bands
Args:
hmm_outputs (dict): a in (a-b)/(a+b)
dwm_outputs (dict): b in (a-b)/(a+b)
year (int or str): year selection. Should be a key in both `hmm_outputs` and `dwm_outputs`
cmap (cmap): valid colormap, should be diverging
band_names (list): list of band names (str) to pass to plot titles
Returns:
Displays grid of plots showing normalized difference in
"""
fig, axs = plt.subplots(dwm_outputs[f'{year}'].count//3,3,figsize=(16,16))
for i,x in enumerate(band_names):
band=i+1
a = hmm_outputs[f'{year}'].read(band)
b = dwm_outputs[f'{year}'].read(band)
show(np.divide(a-b,a+b+1e-8), ax=axs[(i//3),(i%3)], cmap=cmap, vmin=-1, vmax=1,title=x)
return plt.show()
def show_label_agreement(dwm, hmm, label, year, cmap='gray'):
"""
Function plot a visual comparison of the models and groud truth labels
Args:
dwm (dict): Dynamic World Model outputs, with year ints as keys and rasterio dataset pointers as values
hmm (dict): Hidden Markov Model outputs, with year ints as keys and rasterio dataset pointers as values
label (rio.Dataset): rasterio dataset pointer to the label object
year (int): year to select for hmm and dwm
cmap (str): colormap
Returns:
Plots DWM and HMM model labels (argmax) alongside ground truth label, with a difference layer
"""
fig, axs = plt.subplots(1,5,figsize=(5*5,1*5))
show(np.equal(dwm[f'{year}'].read().argmax(axis=0)+1,label.read()),alpha=label.dataset_mask()/255,
ax=axs[0], cmap=cmap, title=f'dwm-label diff')
show(dwm[f'{year}'].read().argmax(axis=0)+1,ax=axs[1],alpha=label.dataset_mask()/255,
cmap=dw_class_cmap,norm=dw_norm,title=f'dwm class')
show(label,ax=axs[2],cmap=dw_class_cmap,norm=dw_norm,title=f'label')
show(hmm[f'{year}'].read().argmax(axis=0)+1,ax=axs[3],alpha=label.dataset_mask()/255,
cmap=dw_class_cmap,norm=dw_norm,title=f'hmm class')
show(np.equal(hmm[f'{year}'].read().argmax(axis=0)+1,label.read()),alpha=label.dataset_mask()/255,
ax=axs[4], cmap=cmap,title=f'hmm-label diff'),
return plt.show()
def show_label_confidence(model, year, cmap='gray_r'):
"""
Function to plot first and second choice classes (argmax), probability/confidence, and a composite of class label and confidence
Args:
model (dict): year ints as keys and rasterio dataset pointers as values
year (int): year
cmap: colormap
Returns:
Plots first and second choice labels and associated probability/confidence, along with a composite of the two.
"""
fig, axs = plt.subplots(2,3,figsize=(3*6,2*6))
show(np.argsort(model[f'{year}'].read(),axis=0)[-1]+1,cmap=dw_class_cmap,norm=dw_norm, title=f'Most likely label',ax=axs[0,0])
show(np.sort(model[f'{year}'].read(),axis=0)[-1],cmap=cmap,vmin=0,vmax=1,ax=axs[0,1],title='argmax')
show(np.argsort(model[f'{year}'].read(),axis=0)[-1]+1,alpha=np.sort(model[f'{year}'].read(),
axis=0)[-1],cmap=dw_class_cmap,norm=dw_norm,ax=axs[0,2],title='labels with conf as alpha')
show(np.argsort(model[f'{year}'].read(),axis=0)[-2]+1,cmap=dw_class_cmap,norm=dw_norm, title=f'2nd most likely label',ax=axs[1,0])
show(np.sort(model[f'{year}'].read(),axis=0)[-2],cmap=cmap,vmin=0,vmax=1,ax=axs[1,1],title='argmax (-1)')
show(np.argsort(model[f'{year}'].read(),axis=0)[-2]+1,alpha=np.sort(model[f'{year}'].read(),
axis=0)[-2],cmap=dw_class_cmap,norm=dw_norm,ax=axs[1,2],title='labels with conf as alpha')
return plt.show()
def number_of_class_changes(model, years):
"""
Function to count class changes
Args:
model (dict): year ints as keys and rasterio dataset pointers as values
years (list): years to loop through for calculating annual change
Returns:
Array of class cumulative class changes per pixel
"""
array = np.zeros_like(model[f'{years[0]}'].read().argmax(axis=0))
for x in years[:-1]:
array += np.not_equal(model[f'{x}'].read().argmax(axis=0),model[f'{x+1}'].read().argmax(axis=0)).astype(int)
return array
def calc_ee_class_transitions(img1, img2, method='frequency_hist',scale=10,numPoints=500):
"""
Function to calculate class transitions between two ee.image.Image objects.
Args:
img1 (ee.image.Image): first imput image (from/before)
img2 (ee.image.Image): second imput image (to/after)
method (str): `frequency_hist` or `stratified_sample`. Frequency_hist approach reduces the image stack
to output a frequency histogram defining class transitions. Stratified_sample retrieves a stratified sample of points
and the built in error_matrix method.
scale (int): scale in meters, defaults to 10
numPoints (int): number of points in stratified sample method, defaults to 500
Returns:
pd.DataFrame of class transition counts from img1 to img2
"""
if not all(isinstance(i, ee.image.Image) for i in [img1, img2]):
print('inputs should be type ee.image.Image, YMMV')
df = pd.DataFrame()
if method == 'frequency_hist':
hist = img1.addBands(img2).reduceRegion(
reducer=ee.Reducer.frequencyHistogram().group(groupField=1,groupName='class'),
bestEffort=True,
scale=scale)
hist_table = hist.getInfo()
df_tm = pd.json_normalize(hist_table['groups']).set_index('class').fillna(0)
df = df.add(df_tm, fill_value=0)
df.index.name = None
df.columns = [x.replace('histogram.','') for x in df.columns] # remove the 'histogram.' prefix from json io parse
cols = sorted(df.columns.values,key=int) # sort string numbers by pretending they're real numbers
df = df[cols].fillna(0) # nans are actually 0 in this case
return df.T
if method == 'stratified_sample':
stacked_classes = img1.rename('before').addBands(img2.rename('after'))
samples = stacked_classes.stratifiedSample(classBand='before',numPoints=numPoints,scale=scale)
trns = samples.errorMatrix(actual='before',predicted='after')
transition_mtx = trns.getInfo()
return pd.DataFrame(transition_mtx)
def transition_matrix(df, kind='classwise', style=True, fmt='{:.2f}', cmap='BuPu'):
"""
Function to normalize and style a confusion matrix DataFrame
Args:
df (pd.DataFrame): DataFrame of confusion matrix counts
kind (str): type of normalization
'changes_only': % of all class changes, exclusing those remaining the same
'classwise_changes_only': % of all class changes by row, excluding those remaining the same
'overall': % of all transitions including classes remaining (non-changes, identity matrix)
'classwise': % of all transitions by row including classes remaining (non-changes, identitity matrix)
stye (bool): defaults True, whether to show style modifiers on output DataFrame
fmt (str): number formatter pattern
cmap (str): colormap to pass to DataFrame background_gradient property
Returns:
pd.DataFrame of normalized confusion matrix
"""
g=df.copy()
if kind=='changes_only':
np.fill_diagonal(g.values, 0)
g = g.divide(sum(g.sum(axis=1)),axis=0)
if kind=='classwise_changes_only':
np.fill_diagonal(g.values, 0)
g = g.divide(g.sum(axis=1),axis=0)
if kind=='overall':
g = g.divide(sum(g.sum(axis=1)),axis=0)
if kind=='classwise':
g = g.divide(g.sum(axis=1),axis=0)
if style:
return g.style.format(fmt).background_gradient(cmap=cmap,axis=None)
if not style:
return g
def all_the_stats(array):
"""
Function calculate ancillary classification metrics
Args:
array (np.array): should be a multi-class confusion matrix
Returns:
Pandas.DataFrame of additional metrics by row
Special reguards to https://en.wikipedia.org/wiki/Confusion_matrix
"""
fp = np.clip((array.sum(axis=0) - np.diag(array)),1e-8,np.inf).astype(float)
tp = np.clip(np.diag(array),1e-8,np.inf).astype(float)
fn = np.clip((array.sum(axis=1) - np.diag(array)),1e-8,np.inf).astype(float)
tn = np.clip((array.sum() - (fp + fn + tp)),1e-8,np.inf).astype(float)
df = pd.DataFrame({
'FP':fp, #false positive
'TP':tp, #true positive
'FN':fn, #false negative
'TN':tn, #true negative
'TPR':tp/(tp+fn), #true positive rate (sensitivity, hit rate, recall)
'TNR':tn/(tn+fp), #true negative rate (specificity)
'PPV':tp/(tp+fp), #positive predictive value (precision)
'NPV':tn/(tn+fn), #negative predictive value
'FPR':fp/(fp+tn), #false positive rate (fall out)
'FNR':fn/(tp+fn), #false negative rate
'FDR':fp/(tp+fp), #false discovery rate
'ACC':(tp+tn)/(tp+fp+fn+tn), #overall class accuracy
})
return df | [
"matplotlib"
] |
c372a2369bfa0b12ad611c2f8431bcf586899eca | Python | marcoadamo1/micro_utilities | /getPeak_improved.py | UTF-8 | 5,306 | 2.625 | 3 | [] | no_license | import os
from pathlib import Path # Check this library, very cool
import numpy as np
#import matplotlib.pyplot as plt
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename, askdirectory
import peakutils
from peakutils.plot import plot as pplot
from matplotlib import pyplot
root = Tk( )
HEADER = "Mod_Q\tI\tErr_I\tSigma_Q\n\n"
SKIPROWS = 40
def smooth(x,window_len=11,window='hanning'):
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def getPeak():
### ============= FOLDER LOADING =============
path = Path(askdirectory(initialdir="C:/Users/adamo/OneDrive - Imperial College London/",
title = "Choose a file."))
print ("Opening files in {}".format(path))
files = os.listdir(path)
toRemove = np.loadtxt(path / files[0], skiprows=SKIPROWS)
linesInFile = len(toRemove[:,1])
### ============= DECLARATION =============
I_total = np.zeros(linesInFile)
Q_total = np.zeros(linesInFile)
E_total = np.zeros(linesInFile)
peakPosition = np.zeros(len(files))
peakIntensity = np.zeros(len(files))
number = np.arange(1, len(files)+1, 1, dtype=int)
i = 0
exit = False
for filename in files:
if not exit:
### ============= FILE LOADING =============
print(filename) #Check if it reads in the correct order
tmp = path / filename
d = np.loadtxt(tmp, skiprows=40) #To access a full column: d[:,0]
if (len(d[:,0]) != linesInFile):
# Just in case someone manually modified a single file
print("File length not expected")
raise
I_total = d[:,1]
Q_total = d[:,0]
#E_total = d[:,2]
#sigma_total = d[:,3]
### ============= SMOOTH THE SIGNALS =============
windowLen = 11
windowIdx = int(windowLen/2)
a = smooth(I_total,window_len=windowLen,window='blackman')
a = a[windowIdx:-windowIdx]
if len(a) != len(I_total):
print("Unexpected length of the smoothed signal!")
print(len(a),len(I_total))
raise ValueError("Unexpected vector length")
if 0: #DEBUG: check if the smoothing is ok
pyplot.plot(I_total)
pyplot.plot(a)
pyplot.yscale('log')
pyplot.xscale('log')
pyplot.show()
I_total = a #Save the smooth signal for interpolation
### ============= GET PEAK POSITIONS =============
indexes = peakutils.indexes(I_total, thres=0.3, min_dist=40)
tmp = peakutils.interpolate(Q_total, I_total, width=3, ind=indexes)
if (tmp.size > 1):
print(tmp.size)
pyplot.figure(figsize=(10,6))
pplot(Q_total, I_total, indexes)
pyplot.title('First estimate')
pyplot.show()
peakPosition[i] = Q_total[0]
else:
peakPosition[i] = tmp
print(peakPosition[i])
#try:
# peakPosition[i] = peakutils.interpolate(Q_total, I_total, ind=indexes)
# print("...{}".format(peaks_x))
#except:
# print("ABORT")
# peakPosition[i] = Q_total[np.argmax(I_total)]
#exit = True
# Get the maximum value for each file
#peakPosition[i] = Q_total[np.argmax(I_total)]
peakIntensity[i] = np.max(I_total)
i+=1
output = ["peak_pos.dat", "peak_int.dat"]
for i in range(0, len(output)):
output[i] = path.parent / output[i]
np.savetxt(output[1], np.c_[number,peakPosition], fmt='%.18e', delimiter='\t', newline='\n', header="number\tpeakPosition\t", footer='', comments='')
np.savetxt(output[2], np.c_[number,peakIntensity], fmt='%.18e', delimiter='\t', newline='\n', header="number\tpeakPosition\t", footer='', comments='')
Title = root.title( "Peak Position")
#label = ttk.Label(root, text ="I'm BATMAN!!!",foreground="red",font=("Helvetica", 16))
#label.pack()
#Menu Bar
menu = Menu(root)
root.config(menu=menu)
file = Menu(menu)
file.add_command(label = 'Open', command = getPeak)
file.add_command(label = 'Exit', command = lambda:exit())
menu.add_cascade(label = 'File', menu = file)
root.mainloop() | [
"matplotlib"
] |
fd6280ac7dc4d65b11aaaf1dd1cdeaaa55b425de | Python | abdsaeed92/fyp-project | /cleaningmodule.py | UTF-8 | 2,423 | 2.71875 | 3 | [] | no_license | import os
import time
import pickle
import numpy as np
import pandas as pd
import seaborn as se
import missingno as msno
from joblib import dump, load
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
# loading model artifacts
start_model_load_time = time.process_time()
clf = load('svcNgram-v0.1.joblib')
print('Model loaded in',time.process_time() - start_model_load_time, 'seconds')
start_vectorizer_load_time = time.process_time()
tifidf_ngram = load('tfidf_vec_gram-v0.1.joblib')
print('Vectorizer loaded in',time.process_time() - start_vectorizer_load_time, 'seconds')
def uniqueWords(X):
try:
X = X.split(' ')
X = set(X)
X = len(X)
return X
except:
return 0
class Dclean():
"""
Represent a cleaner class with cleaning and auxiliary methods.
"""
def __init__(self, name):
"""
Constructor to initialize the cleaner object.
"""
self.name = name
def sit(self):
print(self.name + ' - ' +'initialized')
return self.name + ' - ' +'initialized'
def diagnose_data(self, df, dfname):
plt.close("all")
fig = msno.matrix(df)
fig_copy = fig.get_figure()
fig_copy.savefig('{}/plots/{}.png'.format(os.getcwd(),dfname))
return '{}/plots/{}.png'.format(os.getcwd(),dfname)
def engineer_features(self, df, textcolumn):
dfengineer_features = pd.DataFrame()
df = df.dropna()
dfengineer_features['text'] = df[textcolumn]
dfengineer_features['charCount'] = df[textcolumn].str.len()
dfengineer_features['wordCount'] = df[textcolumn].str.split(' ').str.len()
dfengineer_features['uniqueWords'] = df[textcolumn].apply(uniqueWords)
return dfengineer_features
def cluster(self, dataset,filename):
X = dataset.drop('text', axis = 1)
scaler = StandardScaler()
X = scaler.fit_transform(X)
kmeans = KMeans(n_clusters=7, random_state=0).fit(X)
dataset['Cluster'] = kmeans.labels_
filename = filename.split('.')
dataset.to_csv('{}/{}.csv'.format('annotated_data',filename[0]))
return dataset
def classify_statement(self, statement):
text_labels = ['Clean text', 'Dirty text']
return text_labels[clf.predict(tifidf_ngram.transform([statement]))[0]]
| [
"matplotlib",
"seaborn",
"missingno"
] |
5ddab7fb7d3e02bb9ee84fa30e9b70bf21b43487 | Python | PO-Purple/po-purple | /Oude code/Python/Lijn zoek algorithme/FollowLine5.py | UTF-8 | 15,159 | 2.84375 | 3 | [] | no_license | import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import scipy
from scipy import interpolate
#from scipy.interpolate import splder
from scipy.optimize import fsolve
import random
import time
print scipy.__version__
def createSequence(n):
list = []
for i in xrange(0,n):
list.append(i)
return list
def addFotos():
fotoList = []
fotoList.append('cam2')
for i in xrange(0,10):
fotoList.append('picture_'+str(i))
for i in xrange(0,9):
fotoList.append('picture_A'+str(i))
fotoList.remove('picture_8')
fotoList.remove('picture_A8')
fotoList.remove('picture_A7')
fotoList.remove('picture_4')
return fotoList
def toGrayScale(image):
gray_image_array = image.convert('L')
gray_image = np.asarray(gray_image_array).copy()
return [gray_image, gray_image_array]
def plot(pixel_list, string):
plt.figure()
indices = createSequence(len(pixel_list))
plt.plot(indices, pixel_list, "x")
print 'amount of '+ string + ' is: ' + str(len(pixel_list))
plt.show()
def plot2(pixel_list, width, heigth):
plt.figure()
plt.plot(pixel_list[0], pixel_list[1], "x")
plt.axis([0,heigth,0,width])
plt.show()
def plot3(pixel_list):
plt.figure()
plt.plot(pixel_list[0], pixel_list[1], '.')
plt.show()
# calculate the lighting in an image based on gray values an order these
def calculateLighting(gray_image, width, heigth, interval, showLightingPlot):
pixel_list = []
for y in xrange(0, heigth/interval):
for x in xrange(0,width/interval):
location = (interval*x,interval*y)
pixel_list.append(gray_image.getpixel(location))
pixel_list = np.sort(pixel_list)
if (showLightingPlot == 1):
plot(pixel_list, 'sorted grayscale')
return pixel_list
# calculates a tipping value of a gray image array. The consistency of the shape of
# the (ordened) curve of lighting throughout the image is used.
def calculateTippingValue(gray_image, width, heigth, interval, interval2, showLightingPlot):
pixel_list = calculateLighting(gray_image, width, heigth, interval, showLightingPlot)
searchedPixels = pixel_list[len(pixel_list)/2:]
diff = []
for i in xrange(0, (len(searchedPixels) - 1)/interval2):
diff.append(searchedPixels[i*interval2] - searchedPixels[(i-1)*interval2])
maximum = max(diff)
maxindex = -1
for i in xrange(1, len(diff)):
if diff[i] == maximum:
maxindex = i*interval2
tippingValue = searchedPixels[maxindex]
showSearchPixelsLightingPlot = 0
showDiffLightingPlot = 0
if (showSearchPixelsLightingPlot == 1):
plot(searchedPixels, 'searched values')
if (showDiffLightingPlot == 1):
plot(diff, 'diff values')
return tippingValue
# converts array to absolute black and white based on previously mentioned tipping value
def toBlackAndWhite(gray_image, tippingValue, showBlackAndWhite):
gray_image[gray_image < tippingValue] = 0
gray_image[gray_image >= tippingValue] = 255
if showBlackAndWhite == 1:
blackAndWhite = Image.fromarray(gray_image)
blackAndWhite.show()
return gray_image
def thinGroups(group_list, n):
new_group_list = []
for group in group_list:
new_group = []
x_list = group[0]
y_list = group[1]
k=0
x_rem = []
y_rem = []
for i in xrange(0, len(x_list)):
k+=1
if k == n:
x_rem.append(group[0][i])
y_rem.append(group[1][i])
k = 0
new_group.append(x_rem)
new_group.append(y_rem)
new_group_list.append(new_group)
return new_group_list
# groups pixels logiclly together
def groupPixels2(transformed_edges, large_distance):
x_array = transformed_edges[0]
y_array = transformed_edges[1]
bool_in_group = np.ones(len(transformed_edges[0]), dtype=np.int_)
group_list = []
group_index = -1
while True:
if len(np.nonzero(bool_in_group)[0]) == 0:
break
group_index += 1
group_list.append([[],[]])
ind = np.where(bool_in_group==1)[0][0]
x = x_array[ind]
y = y_array[ind]
group_list[-1][0].append(x)
group_list[-1][1].append(y)
bool_in_group[ind] = 0
group_list, bool_in_group = addBuddies(x, y, ind,
x_array, y_array, bool_in_group, group_list, group_index)
return orderGroups(group_list, large_distance)
def groupPixels3(transformed_edges, large_distance):
x_array = transformed_edges[0]
y_array = transformed_edges[1]
def orderGroups(group_list, large_distance):
new_group_list = []
for group in group_list:
x_list = group[0]
y_list = group[1]
for i in xrange(1, len(x_list)):
x0 = x_list[i-1]
y0 = y_list[i-1]
x1 = x_list[i]
y1 = y_list[i]
distance = np.sqrt((y1-y0)**2+(x1-x0)**2)
if distance > large_distance:
xend = x_list[i:]
xend.reverse()
yend = y_list[i:]
yend.reverse()
new_group_list.append([xend+x_list[:i],yend+y_list[:i]])
break
if len(x_list)-1==i:
new_group_list.append(group)
return new_group_list
def f(x):
return x*0.020
def addBuddies(x, y, index, x_array, y_array, bool_in_group, group_list, group_index):
maxRange = f(x)
# using quicksort ??
xbuddies = np.where((x_array>x-maxRange) & (x_array<x+maxRange))
ybuddies = np.where((y_array>y-maxRange) & (y_array<y+maxRange))
buddy_indices = np.intersect1d(xbuddies, ybuddies)
buddy_indices_remainder = []
for buddy_index in buddy_indices:
if bool_in_group[buddy_index] == 1:
buddy_indices_remainder.append(buddy_index)
buddy_indices = buddy_indices_remainder
distance_list = []
for buddy_index in buddy_indices:
xb = x_array[buddy_index]
yb = y_array[buddy_index]
distance = np.sqrt((yb-y)**2+(xb-x)**2)
distance_list.append(distance)
sorted_buddy_indices = [x for (y,x) in sorted(zip(distance_list,buddy_indices))]
for buddy_index in sorted_buddy_indices:
if (buddy_index != index):
group_list[group_index][0].append(x_array[buddy_index])
group_list[group_index][1].append(y_array[buddy_index])
bool_in_group[buddy_index] = 0
sorted_buddy_indices.reverse()
for buddy_index in sorted_buddy_indices:
if buddy_index != index:
group_list, bool_in_group = addBuddies(x_array[buddy_index],y_array[buddy_index], buddy_index,
x_array, y_array,bool_in_group, group_list, group_index)
return group_list, bool_in_group
# returns something with unit in cm
def XToBirdsEye(x, x_start, x_middle, heigth_camera, heigth):
x = heigth - x
a = np.sqrt(1+(heigth_camera**2)/(x_middle**2))
b = (2*x/heigth*(x_middle - x_start))/((a**2)*(x_middle**2))
result = (-1)*(heigth_camera**2*b+x_start)/(x_middle*b-1)
return result
# returns something with unit in pixels. Inverse of XToBirdsEye.
def XToAngle(x_cm, x_start, x_middle, heigth_camera, heigth):
theta = np.arctan(heigth_camera/x_cm)
theta_middle = np.arctan(heigth_camera/x_middle)
result = (( x_cm - x_start )*np.sin(theta)*heigth)/( 2*( x_middle - x_start )*np.sin(np.pi/2 - theta + theta_middle)*np.sin(theta_middle))
return heigth - result
# returns something with unit in cm
def YToBirdsEye(x, y, x_horizon, base_width, width, heigth):
return (-1)*(( heigth - x_horizon )/( x - x_horizon ))*( (base_width) / (width) )*( (y) - width / 2.0 )
# returns something with unit in pixels. Inverse of YToBirdsEye.
def YToAngle(x_cm, y_cm, x_start, x_middle, heigth_camera, x_horizon, width, heigth):
x = XToAngle(x_cm, x_start, x_middle, heigth_camera, x_horizon)
return (-1)*y_cm/((( x - x_horizon )/( heigth - x_horizon ))*( (base_width) / (width) )) + width / 2.0
# transforms a list of pixels into a list of birds eye viewed distances
def toBirdsEye(pixel_list, x_start, x_middle, heigth_camera, x_horizon, base_width, width, heigth):
new_distance_list = np.array([np.zeros(len(pixel_list[0]), dtype=np.float), np.zeros(len(pixel_list[0]), dtype=np.float)])
for i in xrange(0, len(pixel_list[0])):
x = float(pixel_list[0][i])
y = float(pixel_list[1][i])
x_cm = XToBirdsEye(float(x), x_start, x_middle, heigth_camera, heigth)
y_cm = YToBirdsEye(float(x), float(y), x_horizon, base_width, width, heigth)
new_distance_list[0][i] = x_cm
new_distance_list[1][i] = y_cm
return new_distance_list
# transforms a list of birds eye viewed distances to a list of pixels on the angle and heigth the camera is on
def toAngle(distance_list, x_start, x_middle, heigth_camera, x_horizon, width, heigth, base_width):
new_pixel_list = [[],[]]
for i in xrange(0, len(distance_list[0])):
x_cm = distance_list[0][i]
y_cm = distance_list[1][i]
theta = np.arctan(heigth_camera/x_cm)
theta_middle = np.arctan(heigth_camera/x_middle)
x = heigth - (( x_cm - x_start )*np.sin(theta)*heigth)/( 2*( x_middle - x_start )*np.sin(np.pi/2 - theta + theta_middle)*np.sin(theta_middle))
y = (-1)*y_cm/((( heigth - x_horizon )/( x - x_horizon ))*( (base_width) / (width) )) + width / 2.0
new_pixel_list[0].append(x)
new_pixel_list[1].append(y)
return new_pixel_list
def toAngleOnePixel(x_cm, y_cm, x_start, x_middle, heigth_camera, base_width, x_horizon, width, heigth):
theta = np.arctan(heigth_camera/x_cm)
theta_middle = np.arctan(heigth_camera/x_middle)
x = heigth - (( x_cm - x_start )*np.sin(theta)*heigth)/( 2*( x_middle - x_start )*np.sin(np.pi/2 - theta + theta_middle)*np.sin(theta_middle))
y = y_cm/((( x - x_horizon )/( heigth - x_horizon ))*( (base_width) / (width) )) + width / 2.0
return x,y
def linkGroups2(bw, edges,group_list, line_width,
x_start, x_middle,heigth_camera, base_width, x_horizon,
width, heigth, spline_flatness, showSpline, transformed_edges):
if showSpline == 1:
plt.figure()
remainder_list = []
parallel_group_list = []
for i in xrange(0, len(group_list)):
group = group_list[i]
if len(group[0]) > 4 :
remainder_list.append(group)
x_list = group[0]
y_list = group[1]
tck, u = interpolate.splprep([x_list, y_list], s=spline_flatness)
pixels_on_spline = interpolate.splev(u, tck)
first_derivative = interpolate.splev(u, tck, der=1)
t,c,k = tck
parallel_group_out = [[],[]]
parallel_group_in = [[],[]]
for j in xrange(0, len(pixels_on_spline[0])):
x = pixels_on_spline[0][j]
y = pixels_on_spline[1][j]
dx = first_derivative[0][j]
dy = first_derivative[1][j]
x_parr_out = x + ((line_width/2.0)*dy)/(np.sqrt(dx**2+dy**2))
y_parr_out = y - ((line_width/2.0)*dx)/(np.sqrt(dx**2+dy**2))
x_parr_in = x + ((-1)*(line_width/2.0)*dy)/(np.sqrt(dx**2+dy**2))
y_parr_in = y - ((-1)*(line_width/2.0)*dx)/(np.sqrt(dx**2+dy**2))
parallel_group_out[0].append(x_parr_out)
parallel_group_out[1].append(y_parr_out)
parallel_group_in[0].append(x_parr_in)
parallel_group_in[1].append(y_parr_in)
parallel_group = getCorrectParallelGroup(bw, edges, parallel_group_out, parallel_group_in, x_start, x_middle, heigth_camera, x_horizon, width, heigth, base_width)
parallel_group_list.append(parallel_group)
if showSpline == 1:
plt.plot(pixels_on_spline[0], pixels_on_spline[1])
plt.plot(parallel_group[0], parallel_group[1], '.')
if showSpline == 1:
plt.plot(transformed_edges[0], transformed_edges[1], ',')
plt.axis([0,100,-50,50])
plt.show()
return parallel_group_list
def getCorrectParallelGroup(bw, edges, group1, group2, x_start, x_middle, heigth_camera, x_horizon, width, heigth, base_width):
group1l = toAngle(group1, x_start, x_middle, heigth_camera, x_horizon, width, heigth, base_width)
group2l = toAngle(group2, x_start, x_middle, heigth_camera, x_horizon, width, heigth, base_width)
c1 = 0
c2 = 0
for i in xrange(0, len(group1[0])):
x_parr_in_px = group1l[0][i]
y_parr_in_px = group1l[1][i]
if x_parr_in_px >= heigth or x_parr_in_px < 0 or y_parr_in_px >= width or y_parr_in_px < 0:
pass
else:
if bw[x_parr_in_px][y_parr_in_px] == 255:
c1+=1
for i in xrange(0, len(group2[0])):
x_parr_out_px = group2l[0][i]
y_parr_out_px = group2l[1][i]
if x_parr_out_px >= heigth or x_parr_out_px < 0 or y_parr_out_px >= width or y_parr_out_px < 0:
pass
else:
if bw[x_parr_out_px][y_parr_out_px] == 255:
c2+=1
if c2>c1:
return group2
else:
return group1
# chooses a path to follow
# this will become very important in the second semester
def choosePath(parallel_group_list):
distances = []
for group in parallel_group_list:
x1 = group[0][0]
y1 = group[1][0]
x2 = group[0][-1]
y2 = group[1][-1]
d1 = np.sqrt(x1**2 + y1**2)
d2 = np.sqrt(x2**2 + y2**2)
distances.append(d1, d2)
min_dist = min(distances)
for i in xrange(0, len(distances)):
if distances[i] == min_dist:
min_index = i
break
chosen_one = parallel_group_list[min_index/2]
if min_index%2 == 1:
chosen_one.reverse()
return chosen_one
def calculateCurvature(x, y, dx, dy, ddx, ddy):
return (dx*ddy - ddx*dy)/((dx**2+dy**2)**(3/2))
def calculateSpeeds(path, spline_flatness, future_time):
tck, u = interpolate.splprep([path[0], path[1]], s=spline_flatness)
pixels_on_spline = interpolate.splev(u, tck)
first_derivative = interpolate.splev(u, tck, der=1)
second_derivative = interpolate.splev(u, tck, der=2)
time_list = []
left_motor = []
right_motor = []
for i in xrange(0, len(pixels_on_spline[0])-1):
x = pixels_on_spline[0][i]
y = pixels_on_spline[1][i]
x_next = pixels_on_spline[0][i+1]
y_next = pixels_on_spline[1][i+1]
dx = first_derivative[0][i]
dy = first_derivative[1][i]
ddx = second_derivative[0][i]
ddy = second_derivative[1][i]
kappa = calculateCurvature(x, y, dx, dy, ddx, ddy)
v_c = 9.81/kappa
distance = np.sqrt((y_next-y)**2+(x_next-x)**2)
delta_time = distance/v_c
time_list.append(delta_time)
| [
"matplotlib"
] |
28d5a6f3e8facaa933ecc68dc7d325f92f725a27 | Python | AHollierDS/ParcoursDS_Projet3 | /PSanté_01_scripts.py | UTF-8 | 9,665 | 3.40625 | 3 | [] | no_license | # Preparing working environnement
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import inspect
# -----------------------------------------------------------------------------------------------------------------------------------------
def cut_pie(serie, cut_off = 0.05, title = None):
cut_serie = serie[serie / serie.sum() > cut_off].copy().dropna()
remain = pd.DataFrame(serie[serie / serie.sum() < cut_off].sum(), columns = ['Other']).T
cut_serie = cut_serie.append(remain)
plt.figure(figsize = (8,8))
plt.pie(cut_serie.iloc[:,0], autopct = lambda x: str(round(x,1)) + '%')
plt.legend(cut_serie.index)
plt.title(title, fontweight = 'bold')
plt.show()
# -----------------------------------------------------------------------------------------------------------------------------------------
def retrieve_name(var):
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
# -----------------------------------------------------------------------------------------------------------------------------------------
def df_fillrates(df, col = 'selected columns', h_size = 15):
""" Returns a barplot showing for each column of a dataset df the percent of non-null values """
nb_columns = len(df.columns)
df_fillrate = pd.DataFrame(df.count()/df.shape[0])
df_fillrate.plot.barh(figsize = (h_size, nb_columns/2), title = "Fillrate of columns in {}".format(col))
plt.grid(True)
plt.gca().legend().set_visible(False)
plt.show()
# -----------------------------------------------------------------------------------------------------------------------------------------
def drop_empty_col(df, min_value = 1):
""" Removes columns in the df dataset if the columns contains less than min_value elements"""
columns_list = df.columns.to_list()
df_clean = df.copy()
for col in columns_list:
if df_clean[col].count() < min_value:
df_clean = df_clean.drop(columns = [col])
nb_col_i = len(columns_list)
nb_col_f = len(df_clean.columns.to_list())
print("{} columns out of {} have been dropped of the dataframe".format(nb_col_i - nb_col_f, nb_col_i))
return df_clean
# -----------------------------------------------------------------------------------------------------------------------------------------
def list_criteria (df, criteria, h_size = 15):
""" Returns the fillrate graph of columns in dataframe df whose name matches the criteria"""
# Return the list of columns matching the criteria
df_columns = pd.DataFrame(df.columns, columns = ['Column'])
# Graph of fillrates on corresponding columns
criteria_list = df_columns[df_columns['Column'].str.contains(criteria)]['Column'].to_list()
df_fillrates(df[criteria_list], col = criteria, h_size = h_size)
# -----------------------------------------------------------------------------------------------------------------------------------------
def grade_distrib(df, size = (15,5)):
# Grade series
a_grade = df[df['nutriscore_grade']== 'a']['nutriscore_score']
b_grade = df[df['nutriscore_grade']== 'b']['nutriscore_score']
c_grade = df[df['nutriscore_grade']== 'c']['nutriscore_score']
d_grade = df[df['nutriscore_grade']== 'd']['nutriscore_score']
e_grade = df[df['nutriscore_grade']== 'e']['nutriscore_score']
# Plotting
plt.figure(figsize = size)
plt.hist([a_grade, b_grade, c_grade, d_grade, e_grade], histtype = 'barstacked',
color = ['green','lime','yellow','orange','red'],
bins = 60, range = (-20, 40))
# Plot parameters
plt.xlabel('Nutrition score', fontstyle = 'italic', backgroundcolor = 'lightgrey')
plt.ylabel('Number of products', fontstyle = 'italic', backgroundcolor = 'lightgrey')
plt.title('Distribution of nutriscore and grade categorization', fontweight = 'bold', fontsize = 12)
plt.legend(['a','b','c','d','e'])
plt.grid(True)
plt.show()
# -----------------------------------------------------------------------------------------------------------------------------------------
def both_boxplot(df, col, low_bound = 0 , up_bound = 1000000000, fig_size = (15,5)):
""" Returns 2 or 3 boxplots of selected columns in selected dataframe"""
""" First boxplot returns all values, including outliers"""
""" If a lower or upper boundary is given, second boxplot narrows the first one between these boundaries"""
""" Third boxplot excludes the outliers"""
plt.figure(figsize = fig_size)
plt.suptitle("Distribution of values in column {} ".format(col), fontsize = 12, fontweight = 'bold' )
# Boxplot with every value
plt.subplot(131)
plt.boxplot(df[df[col].isna() == False][col], showfliers = True)
plt.title('All values')
plt.grid(True)
# If a boundary is specified, plot the second boxplot
if (low_bound != 0) or (up_bound != 1000000000) :
plt.subplot(132)
plt.boxplot(
df[(df[col] > low_bound) & (df[col] < up_bound) & (df[col].isna() == False)][col],
showfliers = True)
plt.title('With restricted values')
plt.grid(True)
third_index = 133
else :
third_index = 132
# Boxplot excluding outliers
plt.subplot(third_index)
plt.boxplot(df[df[col].isna() == False][col], showfliers = False)
plt.title('No outliers')
plt.grid(True)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------------------------
def higher_values(df, col, up_bound, limit = 10):
return(df[df[col] > up_bound][['product_name','categories', 'main_category',col]].sort_values(by = col, ascending = False).head(limit))
# ----------------------------------------------------------------------------------------------------------------------------------------
def plot_score(df, x_crit, y_crit):
plt.figure(figsize = (15,5))
grades = df[~df['nutriscore_grade'].isna()]['nutriscore_grade'].sort_values().unique()
colors = ['green','lime','yellow','orange','red']
for i in range(0,5):
x_ = df[(~df['nutriscore_score'].isna()) & (df['nutriscore_grade'] == grades[i])][x_crit]
y_ = df[(~df['nutriscore_score'].isna()) & (df['nutriscore_grade'] == grades[i])][y_crit]
plt.scatter(x_, y_, marker = "+", c = colors[i])
plt.xlabel(x_crit)
plt.ylabel(y_crit)
plt.legend(grades)
# plt.gca().set_xlim(x_limit)
plt.grid(True)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------------------------
def plot_percentile(df, title = "Values per percentile", max_p = 95):
""" Plots the values at each percentile for columns of the given dataframe. Three graphs are given :
- First includes all values in each columns
- Second is limited to the 99-th percentile
- Last graph is limited by the max_p-th percentile
"""
plt.figure(figsize = (20,5))
plt.suptitle(title, fontsize = 12, fontweight = 'bold')
perc_lim = [1.01, 1.00, (max_p/100) + 0.01]
title_list = ["Including all values","Up to the 99-th percentile", "Up to the {}-th percentile".format(max_p)]
for i in [0,1,2]:
plt.subplot(131 + i)
plt.plot(df.quantile(np.arange(0.01, perc_lim[i], 0.01)))
plt.legend(df.columns)
plt.xlabel("Percentile", backgroundcolor = 'lightgrey', fontstyle = 'italic')
plt.ylabel("Value", backgroundcolor = 'lightgrey', fontstyle = 'italic')
plt.grid(True)
plt.title(title_list[i])
plt.show()
# -----------------------------------------------------------------------------------------------------------------------------------------
def df_excess_rates(df, limit):
""" Returns a barplot showing for each column of a dataset df the percent of values exceeding a given limit """
nb_columns = len(df.columns)
df_excess = pd.DataFrame(df.applymap(lambda x: x>100).sum()/df.count())
if len(df_excess[df_excess[0] > 0]) > 0:
df_excess[df_excess[0] > 0].plot.bar(figsize = (15, 5), title = "Percentage of values > {}".format(limit), rot = 45)
plt.grid(True)
plt.gca().legend().set_visible(False)
else:
print("No columns with values > {} ".format(limit))
# -----------------------------------------------------------------------------------------------------------------------------------------
def plot_corr(df, selection, criteria):
""" Returns a heatmap showing the strongers correlations between the selected columns and all other columns of df.
Only the correlation index above criteria are shown"""
# Creation of a correlation matrix
df_correlations = df.corr(method = 'pearson')
# Showing only correlations index above given criteria
df_correlations = df_correlations.applymap(lambda x: 0 if abs(x) < criteria else x).copy()
# Restricting to correlations for the selected columns
df_selected_corr = df_correlations[selection]
df_selected_corr = df_selected_corr.drop(index = selection)
df_selected_corr['sum'] = df_selected_corr.sum(axis = 1)
df_selected_corr = df_selected_corr[df_selected_corr['sum'] != 0].drop(columns = 'sum')
# Creation of the heatmap
sns.heatmap(df_selected_corr, cmap = 'coolwarm')
| [
"matplotlib",
"seaborn"
] |
8a7db5869aeeb11e25d4ffc6d84d880a452e26b3 | Python | jiricodes/corona-stats | /testarea.py | UTF-8 | 3,541 | 2.703125 | 3 | [] | no_license | import json
import os
import fnmatch
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as pl
import statistics
from srcs.load_dailydata import load_daydata
import seaborn as sns
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
def dataframe_tojson(country, data):
data.to_json(f'{country}.json')
with open(f'{country}.json', 'r') as f:
stuff = json.load(f)
with open(f'{country}.json', 'w') as f:
json.dump(stuff, f, indent=4)
# Incubation time avg 5.1 days, meadian time to die 15 days
# returns calculated value of potentially infected 20 days earlier
# based on given mortality rate (0-100%)
def infected_based_deaths(deaths, ratio):
return (100/ratio) * deaths
def get_death_change(data, current):
i = 0
for day in data['deaths']:
if day > current:
return i
i += 1
return None
def create_estimate(data, tod, mort_rat, s_death):
new = [0] * len(data.index)
start = get_death_change(data, s_death)
if not start or start < 20:
return None
new[start - 20] = int(infected_based_deaths(data['deaths'][start], mort_rat))
i = start - 20
k = 1
while i + k < len(new):
new[i + k] = int((k * new[i] / tod) + new[i])
if k == tod:
i = i + k
k = 1
else:
k += 1
i = start - 20
k = 1
while i - k >= 0:
value = int(new[i] - (k * new[i] / 2 / tod))
if value < 0:
break
else:
new[i - k] = value
if k == tod:
i = i - k
k = 1
else:
k += 1
return new
def get_median_estimate(data):
est = list()
i = 0
while i < len(data[0]):
tmp = list()
for line in data:
tmp.append(line[i])
m = statistics.median_grouped(tmp)
est.append(m)
i += 1
return est
if __name__ == "__main__":
cf.go_offline()
all_days = list()
for file in os.listdir('daily-stats/'):
if fnmatch.fnmatch(file, '*.json'):
all_days.append(load_daydata(f"daily-stats/{file}"))
all_data = pd.concat(all_days).groupby(['countryRegion', 'date']).sum()
ch = all_data.loc['China']
ch.iplot()
# all_data = pd.concat(all_days).groupby(['countryRegion']).sum()
# print(all_data)
# countries_interest = ['Belgium', 'US', 'Italy', 'Iran']
# data_sets = list()
# for country in countries_interest:
# new = all_data.loc[country]
# all_estimates = list()
# d = 0
# while d < new['deaths'][-1]:
# est = create_estimate(new, 7, 1, d)
# all_estimates.append(est)
# d = new['deaths'][get_death_change(new, d)]
# med_est = get_median_estimate(all_estimates)
# new['estimate'] = med_est
# data_sets.append(new)
# print(new)
# tmat = all_data.pivot_table(index='countryRegion', columns='date', values='deaths').fillna(value=0)
# print(tmat)
# sns.heatmap(tmat, cmap='coolwarm')
# sns.jointplot(x='deaths',y='confirmed',data=all_data,kind='kde')
# # Plottings
# rows = int(len(data_sets)/3) + 1
# fig, ax = pl.subplots(ncols=3, nrows=2)
# i = 0
# while i < len(countries_interest):
# ax[int(i/3)][int(i%3)].plot(data_sets[i].index, data_sets[i]['confirmed'], label=f'{countries_interest[i]} Confirmed')
# # ax.plot(data_sets[i].index, data_sets[i]['recovered'], label=f'{countries_interest[i]} Recovered')
# # ax.plot(data_sets[i].index, data_sets[i]['deaths'], label=f'{countries_interest[i]} Deaths')
# # ax[i].plot(data_sets[i].index, data_sets[i][f'estimate'], label=f'{countries_interest[i]} Estimated Infected')
# ax[int(i/3)][int(i%3)].set_title(f'{countries_interest[i]}')
# i += 1
# pl.tight_layout()
# # fig.legend(bbox_to_anchor=(-0.15, 0.25, 0.5, 0.5))
pl.show() | [
"matplotlib",
"seaborn"
] |
366f41b56c1bc5b73a520997a1ff2869d50a33c4 | Python | tommybutler/mlearnpy2 | /home--tommy--mypy/mypy/lib/python2.7/site-packages/matplotlib/tests/test_offsetbox.py | UTF-8 | 3,271 | 2.609375 | 3 | [
"Unlicense"
] | permissive | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
@image_comparison(baseline_images=['offsetbox_clipping'], remove_text=True)
def test_offsetbox_clipping():
# - create a plot
# - put an AnchoredOffsetbox with a child DrawingArea
# at the center of the axes
# - give the DrawingArea a gray background
# - put a black line across the bounds of the DrawingArea
# - see that the black line is clipped to the edges of
# the DrawingArea.
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
def test_offsetbox_clip_children():
# - create a plot
# - put an AnchoredOffsetbox with a child DrawingArea
# at the center of the axes
# - give the DrawingArea a gray background
# - put a black line across the bounds of the DrawingArea
# - see that the black line is clipped to the edges of
# the DrawingArea.
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
fig.canvas.draw()
assert not fig.stale
da.clip_children = True
assert fig.stale
def test_offsetbox_loc_codes():
# Check that valid string location codes all work with an AnchoredOffsetbox
codes = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
fig, ax = plt.subplots()
da = DrawingArea(100, 100)
for code in codes:
anchored_box = AnchoredOffsetbox(loc=code, child=da)
ax.add_artist(anchored_box)
fig.canvas.draw()
| [
"matplotlib"
] |
cbe76809e5323c14232c437c0ae34c77e95e4c72 | Python | masonwang96/Backpropagation | /LinearRegression.py | UTF-8 | 2,304 | 3.5 | 4 | [] | no_license | import math
import numpy as np
import matplotlib.pyplot as plt
class LinearRegression():
def __init__(self, x, y, learning_rate, num_iterations):
self.x = x
self.y = y
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.W = np.zeros(shape=(1, 1)) # m x 1
self.b = np.zeros(shape=(1, 1)) # 1 x 1
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def sigmoid_derivative(self, x):
s = self.sigmoid(x)
return s * (1-s)
def propagate(self):
# num of samples
m = self.x.shape[0]
for xi in self.x[1]:
# Forward prop
Z = np.dot(self.W.T, self.x) + self.b
A = self.sigmoid(Z)
Loss = 1/m * np.dot((A-self.y).T, A-self.y)
# Backward prop
dL_dA = 2 * (A-self.y)
dA_dZ = self.sigmoid_derivative(self.x)
dZ_dW = self.x
dZ_db = 1
dW = (1/m) * dL_dA * dA_dZ * dZ_dW
db = (1/m) * dL_dA * dA_dZ * dZ_db
loss = np.squeeze(loss) #从数组的形状中删除单维度条目,即把shape中为1的维度去掉
grads = {'dw': dW, 'db': db}
return grads, loss
def optimize(self):
losses = []
for i in range(self.num_iterations):
# Generate grads
grads, loss = self.propagate()
dW = grads['dW']
db = grads['db']
# Update params
W = W - dW * self.learning_rate
b = b - db * self.learning_rate
# Record losses
losses.append(loss)
if i % 10 == 0:
print('Loss after iteration %i: %f' % (i, loss))
print('Training finished!!!')
params = {'W': W, 'b': b}
grads = {'dw': dW, 'db': db}
return params, grads, losses
def predict(self, x):
# num of samples
m = x.shape[0]
pred = np.zeros((m, 1))
for i in range(A.shape[0]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
A = self.sigmoid(np.dot(self.W.T, self.x)+self.b)
pred[i, 0] = A
return pred
if __name__ == '__main__':
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = 2*x_data - 2 + noise
# y_data = np.square(x_data)- 0.5 + noise
regressor = LinearRegression(x_data, y_data, 0.01, 100)
regressor.optimize()
# Plot data
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
prediction_value = regressor.predict(x_data)
lines = ax.plot(x_data, prediction_value, 'r-', lw = 5)
plt.show()
| [
"matplotlib"
] |
352d7687d7a06cd9b39f86c0c561d8bb264dc120 | Python | edm8985/IMGS_589 | /general_toolbox/Spectral_Response.py | UTF-8 | 11,486 | 2.875 | 3 | [] | no_license | """
title::
Spectral Response Calculation
description::
This program will detect an digital count value in a given image,
then using the spectral power distribution, calculate the
spectral response of that sensor for the given wavelengths.
attributes::
TBD
dependencies::
os, os.path, pandas, tkinter, cv2, matplotlib, numpy
build:
Tested and run in python3
author::
Geoffrey Sasaki
copyright::
Copyright (C) 2017, Rochester Institute of Technology
"""
from __future__ import print_function
import sys
import os
import resource
if sys.version_info[0] == 2:
import Tkinter
import ttk
import tkFileDialog as filedialog
else:
import tkinter
from tkinter import filedialog, ttk
import cv2
import matplotlib
matplotlib.use("TkAgg")
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from os.path import splitext, basename
def openAndReadSPD():
"""
This function prompts the user to select the spectral power
distribution excel document or comma seperated values. It then
creates and returns a numpy array of the data while rounding the
wavelengths to the nearest integer, typically in 10nm increments.
"""
spdFile = filedialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("Excel files", "*.xlsx *.xls"),
("Comma Seperated Values", "*.csv")],
title="Choose the Spectral Power Distribution")
if spdFile == '':
sys.exit()
if spdFile[-4:] == "xlsx" or spdFile[-3:] == "xls":
spdXLS = pd.read_excel(spdFile)
spdArray = spdXLS.as_matrix().transpose()
elif spdFile[-3:] == "csv":
spdArray = np.genfromtxt(spdFile, delimiter=',').transpose()
spdArray = np.nan_to_num(spdArray)
else:
msg = "Unsupported filetype given. Cannot create numpy array."
raise TypeError(msg)
if spdArray.shape[0] != 2 or spdArray.dtype != np.float64:
msg = "The spectral power distribution was not read in correctly"
raise ValueError(msg)
spdArray[0] = np.around(spdArray[0]).astype(int)
#spdDictionary = dict(zip(spdArray[0],spdArray[1]))
return spdArray
def openAndReadTiff(root):
"""
This function prompts the user to select the directory with the tiff
images and creates a list with all of the tiff image locations and
creates a list with all of the images that have been read in.
"""
tiffDir = filedialog.askdirectory(initialdir=os.getcwd())
if tiffDir == '':
sys.exit()
tiffList = []
for file in os.listdir(tiffDir):
if file.endswith(('.tiff','.tif','.TIF','.TIFF')):
tiffList.append(tiffDir +'/'+ file)
tiffList.sort()
root.deiconify()
imageList = []
status_text = tkinter.StringVar()
label = tkinter.Label(root, textvariable=status_text)
label.pack()
progress_variable = tkinter.DoubleVar()
progressbar = ttk.Progressbar(root, variable=progress_variable, maximum=len(tiffList))
progressbar.pack()
for imageFile in tiffList:
im = cv2.imread(imageFile, cv2.IMREAD_UNCHANGED)
imageList.append(im)
progress_variable.set(tiffList.index(imageFile))
status_text.set("Reading in image {0}".format(os.path.basename(imageFile)))
root.update_idletasks()
root.update()
root.withdraw()
return tiffList, imageList
def findBrightestPoint(imageList, tiffList, radius=200, verbose=False):
"""
This function takes in the list of images and then computes the
minimum and maximum digital count and their respective locations
of the gray version of the images. It then finds the brightest
digital count out of all of the images its location.
"""
brightValues = []
brightLocations = []
for image in imageList:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#gray = cv2.boxFilter(gray, cv2.CV_8U, (5,5))
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(gray)
brightValues.append(maxVal)
brightLocations.append(maxLoc)
#maxCount = int(np.power(2,int(str(image.dtype)[4:])))
#radius = np.around(image.shape[0] * .05).astype(int)
#cv2.circle(image, maxLoc, radius, (maxCount,maxCount,maxCount), 5)
#cv2.namedWindow("imagebrightest", cv2.WINDOW_AUTOSIZE)
#cv2.imshow("imagebrightest", cv2.resize(image, None,
# fx=.2,fy=.2, interpolation=cv2.INTER_AREA))
#cv2.waitKey(0)
#print("The brightest value found was: {0} at {1}".format(maxVal, maxLoc))
brightestImageIndex = np.argmax(brightValues)
maxLocation = brightLocations[brightestImageIndex]
if verbose:
brightestImage = tiffList[brightestImageIndex]
bIm = cv2.imread(brightestImage, cv2.IMREAD_UNCHANGED)
maxCount = int(np.power(2,int(str(bIm.dtype)[4:])))
radius = np.around(bIm.shape[0] * .05).astype(int)
cv2.circle(bIm, maxLocation, radius, (maxCount,maxCount,maxCount), 5)
cv2.namedWindow("brightestImage", cv2.WINDOW_AUTOSIZE)
cv2.imshow("brightestImage", cv2.resize(bIm, None,
fx=.2,fy=.2, interpolation=cv2.INTER_AREA))
return maxLocation
def calculateMeanDC(imageList, maxLocation, spdArray, radius=200):
"""
This function takes in all of the images, the maximum location,
the spectral power distribution, and a user specified radius.
It then creates a circular mask with the given radius over each
of the images to compute the mean of the circle in the image.
After it computes the mean digital count of the circle, it normalizes
it by the spectral power distribution at that wavelength.
"""
meanDCList = []
meanDCArray = np.empty((len(imageList),3),dtype="float64")
for image in range(len(imageList)):
mask = np.zeros(imageList[image].shape, np.uint8)
cv2.circle(mask, maxLocation, radius, (255,255,255), -1)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
meanDC = cv2.mean(imageList[image], mask=mask)[:3]
#print(meanDC)
meanDCArray[image, :3] = meanDC
#print(meanDCArray[image, :3])
#meanDC = np.hstack(cv2.mean(image, mask=mask)[:3])
#print(meanDC)
#meanDCArray[imageList.index(image)] = meanDC
#meanDCArray = np.vstack((meanDCArray, meanDC))
#print(meanDCArray)
#meanDCList.append(meanDC)
#meanDCArray[imageList.index(image)] = cv2.mean(image, mask=mask)[:3]
#print("Mean DCs: Blue {0}, Green {1}, Red {2}".format(
# meanDC[0], meanDC[1], meanDC[2]))
#return np.asarray(meanDCList, dtype='float64')
return meanDCArray
def normalizeMeanDC(meanDC, spdArray):
"""
Implements the normalization by spectral power distribution and
the peak normalized relative spectral response.
"""
normSP = np.zeros_like(meanDC, dtype='float64')
normSP[:] = meanDC[:]/spdArray[1].astype('float64')
peakNormalized = np.zeros_like(normSP, dtype='float64')
for band in np.arange(0,peakNormalized.shape[0]):
peakNormalized[band] = normSP[band]/np.amax(normSP[band])
return peakNormalized, normSP
def plotSpectralResponse(spdArray, meanDC, normDC, peakNorm):
"""
Show's each of the graphs using matplotlib
"""
plt.ion()
figure1 = plt.figure('Spectral Power Distribution')
x = spdArray[0]
SPD = figure1.add_subplot(1,1,1)
SPD.set_title("Spectral Power Distribution Function")
SPD.set_xlabel("Wavelength [nm]")
SPD.set_ylabel("Power [Watts]")
SPD.set_ylim([0,max(spdArray[1])+.25*max(spdArray[1])])
SPD.set_xlim([min(spdArray[0]),max(spdArray[0])])
SPD.plot(x, spdArray[1], color = 'black')
figure2 = plt.figure('RAW Sensor Spectral Response')
RAW = figure2.add_subplot(1,1,1)
RAW.set_title("RAW sensor spectral response")
RAW.set_xlabel("Wavelength [nm]")
RAW.set_ylabel("Spectral Response [Digital Count]")
RAW.set_ylim([0,max(meanDC.flat)+.25*max(meanDC.flat)])
RAW.set_xlim([min(spdArray[0]),max(spdArray[0])])
RAW.plot(x, meanDC[0], color = 'blue')
RAW.plot(x, meanDC[1], color = 'green')
RAW.plot(x, meanDC[2], color = 'red')
figure3 = plt.figure('Relative Sensor Spectral Response')
RSR = figure3.add_subplot(1,1,1)
RSR.set_title("Relative Sensor spectral response")
RSR.set_xlabel("Wavelength [nm]")
RSR.set_ylabel("Relative Spectral Response [Digital Count/Watt]")
RSR.set_ylim([0,max(normDC.flat)])
RSR.set_xlim([min(spdArray[0]),max(spdArray[0])])
RSR.plot(x, normDC[0], color = 'blue')
RSR.plot(x, normDC[1], color = 'green')
RSR.plot(x, normDC[2], color = 'red')
figure4 = plt.figure('Peak Normalized Relative Sensor Spectral Response')
PRSR = figure4.add_subplot(1,1,1)
PRSR.set_title("Peak Normalized Relative Sensor Spectral Response")
PRSR.set_xlabel("Wavelength [nm]")
PRSR.set_ylabel("Peak Normalized Relative Spectral Response [unitless]")
PRSR.set_ylim([0,max(peakNorm.flat)])
PRSR.set_xlim([min(spdArray[0]),max(spdArray[0])])
PRSR.plot(x, peakNorm[0], color = 'blue')
PRSR.plot(x, peakNorm[1], color = 'green')
PRSR.plot(x, peakNorm[2], color = 'red')
plt.draw()
plt.pause(.001)
plt.show()
def saveData(spdArray, meanDC, normDC, peakNorm, tiffList):
"""
This function writes out the resultant data. They can be found
seprately or in its entirety in a single comma seperated value.
"""
directory = os.path.dirname(tiffList[0])
SpectralResponse = np.concatenate((spdArray, meanDC,
normDC, peakNorm), axis=0).transpose()
meanDC = np.insert(meanDC, 0, spdArray[0], axis=0)
normDC = np.insert(normDC, 0, spdArray[0], axis=0)
peakNorm = np.insert(peakNorm, 0, spdArray[0], axis=0)
np.savetxt(directory+"/SpectralPowerDistribution.csv",
spdArray.transpose(), delimiter=",")
np.savetxt(directory+"/RAWSpectralResponse.csv",
meanDC.transpose(), delimiter=",")
np.savetxt(directory+"/RelativeSpectralResponse.csv",
normDC.transpose(), delimiter=",")
np.savetxt(directory+"/PeakNormalizedRSR.csv",
peakNorm.transpose(), delimiter=",")
np.savetxt(directory+"/SpectralResponse.csv", SpectralResponse,
delimiter=",", header="Wavelength, Power, Raw Blue," + \
"Raw Green, Raw Red, Norm Blue, Norm Green, Norm Red,"+ \
"Peak Blue, Peak Green, Peak Red", comments='')
def flush():
print( 'Press "c" to continue, ESC to exit, "q" to quit')
delay = 100
while True:
k = cv2.waitKey(delay)
# ESC pressed
if k == 27 or k == (65536 + 27):
action = 'exit'
print( 'Exiting ...')
plt.close()
break
# q or Q pressed
if k == 113 or k == (65536 + 113) or k == 81 or k == (65536 + 81):
action = 'quit'
print( 'Quitting ...')
plt.close()
break
# c or C pressed
if k == 99 or k == (65536 + 99) or k == 67 or k == (65536 + 67):
action = 'continue'
print( 'Continuing ...')
plt.close()
break
return action
if __name__ == '__main__':
import time
root = tkinter.Tk()
root.withdraw()
root.geometry('{0}x{1}'.format(400,100))
verbose = True
radius = 200
root.update()
spdArray = openAndReadSPD()
root.update()
startTime = time.time()
tiffList, imageList = openAndReadTiff(root)
if len(tiffList) != len(spdArray[0]):
msg = "The number of images and the number of spectral power" + \
"distributions measured were not the same."
raise ValueError(msg)
maxLocation = findBrightestPoint(imageList, tiffList, radius, verbose)
meanDC = calculateMeanDC(imageList, maxLocation, spdArray, radius)
meanDC = meanDC.transpose()
peakNormalized, normSP = normalizeMeanDC(meanDC, spdArray)
plotSpectralResponse(spdArray, meanDC, normSP, peakNormalized)
saveData(spdArray, meanDC, normSP, peakNormalized, tiffList)
memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print("The memory used is: {0} [mb].".format(memory/1000000))
print("The run time is: {0}".format(time.time()-startTime))
action = flush()
root.destroy()
| [
"matplotlib"
] |
270dd2884ed8cee274b40fa9bc94411663fdaa83 | Python | maciejczyzewski/kck2019 | /bin/3a.py | UTF-8 | 5,175 | 2.921875 | 3 | [] | no_license | import matplotlib
# matplotlib.use("Agg") # So that we can render files without GUI
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import sys, math
from matplotlib import colors
from collections.abc import Iterable
def plot_color_gradients(gradients, names, height=None):
rc("legend", fontsize=10)
column_width_pt = 400 # Show in latex using \the\linewidth
pt_per_inch = 72
size = column_width_pt / pt_per_inch
height = 0.75 * size if height is None else height
fig, axes = plt.subplots(nrows=len(gradients),
sharex=True,
figsize=(size, height))
fig.subplots_adjust(top=1.00, bottom=0.05, left=0.25, right=0.95)
if not isinstance(axes, Iterable):
axes = [axes]
for ax, gradient, name in zip(axes, gradients, names):
# Create image with two lines and draw gradient on it
img = np.zeros((2, 1024, 3))
for i, v in enumerate(np.linspace(0, 1, 1024)):
img[:, i] = gradient(v)
im = ax.imshow(img, aspect="auto")
im.set_extent([0, 1, 0, 1])
ax.yaxis.set_visible(False)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.25
y_text = pos[1] + pos[3] / 2.0
fig.text(x_text, y_text, name, va="center", ha="left", fontsize=10)
plt.show()
fig.savefig("gradients.pdf")
################################################################################
################################################################################
n = lambda x: max(0, min(1, x))
# a - ile osiaga maksymalnie
# b - gdzie jest szczyt
# c - tempo/ciezkosc
def gaussian(x, a, b, c, d=0):
b += 0.00001 # FIXME: ?
return a * math.exp(-(x - b)**2 / (2 * c**2)) + d
def isogradient(v, pallete):
params = isopallete(pallete)
def find_near_k(v, params, k=4):
sort_list = []
for p in params:
diff = abs(v * 255 - p[1])
sort_list.append([diff, p])
result = sorted(sort_list)[0:k]
return [p[1] for p in result]
r = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[0])])
g = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[1])])
b = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[2])])
return (n(int(r) / 255), n(int(g) / 255), n(int(b) / 255))
def isopallete(pallete):
# FIXME: output could be cached
vec_r, vec_g, vec_b = [], [], []
span = len(pallete.keys())
for key, val in pallete.items():
dynamic_param = 255 / (span * 2)
vec_r += [[val[0], key * 255, dynamic_param]]
vec_g += [[val[1], key * 255, dynamic_param]]
vec_b += [[val[2], key * 255, dynamic_param]]
return [vec_r, vec_g, vec_b]
def test_gradient(f):
vec_x = np.arange(0, 1, 0.005)
vec_y1, vec_y2, vec_y3 = np.vectorize(f)(vec_x)
plt.plot(vec_x, vec_y1, color="red")
plt.plot(vec_x, vec_y2, color="green")
plt.plot(vec_x, vec_y3, color="blue")
plot_color_gradients([f], ["test"], height=0.5)
sys.exit()
################################################################################
################################################################################
def hsv2rgb(h, s, v):
c = v * s
x = c * (1 - abs((h / 60) % 2 - 1))
m = v - c
r, g, b = {
0: (c, x, 0),
1: (x, c, 0),
2: (0, c, x),
3: (0, x, c),
4: (x, 0, c),
5: (c, 0, x),
}[int(h / 60) % 6]
return ((r + m), (g + m), (b + m))
def gradient_rgb_bw(v):
return (v, v, v)
def gradient_rgb_gbr(v):
pallete = {0: [0, 255, 0], 0.5: [0, 0, 255], 1: [255, 0, 0]}
return isogradient(v, pallete)
def gradient_rgb_gbr_full(v):
pallete = {
0: [0, 255, 0],
1 * (1 / 4): [0, 255, 255],
2 * (1 / 4): [0, 0, 255],
3 * (1 / 4): [255, 0, 255],
1: [255, 0, 0],
}
return isogradient(v, pallete)
def gradient_rgb_wb_custom(v):
pallete = {
0: [255, 255, 255],
1 * (1 / 7): [255, 0, 255],
2 * (1 / 7): [0, 0, 255],
3 * (1 / 7): [0, 255, 255],
4 * (1 / 7): [0, 255, 0],
5 * (1 / 7): [255, 255, 0],
6 * (1 / 7): [255, 0, 0],
1: [0, 0, 0],
}
return isogradient(v, pallete)
def interval(start, stop, value):
return start + (stop - start) * value
def gradient_hsv_bw(v):
return hsv2rgb(0, 0, v)
def gradient_hsv_gbr(v):
return hsv2rgb(interval(120, 360, v), 1, 1)
def gradient_hsv_unknown(v):
return hsv2rgb(120 - 120 * v, 0.5, 1)
def gradient_hsv_custom(v):
return hsv2rgb(360 * (v), n(1 - v**2), 1)
if __name__ == "__main__":
def toname(g):
return g.__name__.replace("gradient_", "").replace("_", "-").upper()
# XXX: test_gradient(gradient_rgb_gbr_full)
gradients = (
gradient_rgb_bw,
gradient_rgb_gbr,
gradient_rgb_gbr_full,
gradient_rgb_wb_custom,
gradient_hsv_bw,
gradient_hsv_gbr,
gradient_hsv_unknown,
gradient_hsv_custom,
)
plot_color_gradients(gradients, [toname(g) for g in gradients])
| [
"matplotlib"
] |
70754f49fdf1c28847c65e47228c4d1966d71f15 | Python | parksunwoo/dl_from_scratch | /ch04/numerical_diff.py | UTF-8 | 1,239 | 3.734375 | 4 | [] | no_license | import numpy as np
import matplotlib.pylab as plt
# def numerical_diff(f, x):
# h = 10e-50
# return (f(x+h)- f(x)) /h
# def numerical_diff(f, x):
# h = 1e-4
# return (f(x+h) - f(x-h)) / (2*h)
def function_1(x):
return 0.01*x**2 + 0.1*x
def function_2(x):
return x[0] **2 + x[1]**2
def function_tmp1(x0):
return x0*x0 + 4.0**2.0
def function_tmp2(x1):
return 3.0**2.0 + x1*x1
x = np.arange(0.0, 20.0, 0.1)
y = function_1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
plt.plot(x, y)
# plt.show()
# print(numerical_diff(function_1, 5))
# print(numerical_diff(function_1, 10))
# print(numerical_diff(function_tmp1, 3.0))
# print(numerical_diff(function_tmp2, 4.0))
def numerical_gradient(f, x):
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
# f(x+h)
x[idx] = tmp_val + h
fxh1 = f(x)
# f(x-h)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
print(numerical_gradient(function_2, np.array([3.0, 4.0])))
print(numerical_gradient(function_2, np.array([0.0, 2.0])))
print(numerical_gradient(function_2, np.array([3.0, 0.0])))
| [
"matplotlib"
] |
3b3d9298ac79fec9014781806ded3d05c7cb0381 | Python | anshi-7/Fuzzy-Expert-System | /fuzzy.py | UTF-8 | 3,234 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 11:27:20 2020
@author: Anshi Srivastav
"""
import numpy as np
import skfuzzy as fuzz
import matplotlib.pyplot as plt
from skfuzzy import control as ctrl
#Now Antecedent/Consequent objects hold universe variables and membership functions
m = ctrl.Antecedent(np.arange(0, 0.75, 0.05), 'Mean Delay')
s = ctrl.Antecedent(np.arange(0, 1.05, 0.05), 'Number of Servers')
p = ctrl.Antecedent(np.arange(0, 1.05, 0.05), 'Repair Utilization Factor')
n = ctrl.Consequent(np.arange(0, 1.05, 0.05), 'Number of Spares') #this is output.
#Generate fuzzy membership functions
m['Very Small'] = fuzz.trapmf(m.universe, [0, 0, 0.1, 0.3]) #numerical ranges
m['Small'] = fuzz.trimf(m.universe, [0.1, 0.3, 0.5])
m['Medium'] = fuzz.trapmf(m.universe, [0.4, 0.6, 0.7, 0.7])
s['Small'] = fuzz.trapmf(s.universe, [0, 0, 0.15, 0.35])
s['Medium'] = fuzz.trimf(s.universe, [0.3, 0.5, 0.7])
s['Large'] = fuzz.trapmf(s.universe, [0.6, 0.8, 1, 1])
p['Low'] = fuzz.trapmf(p.universe, [0, 0, 0.4, 0.6])
p['Medium'] = fuzz.trimf(p.universe, [0.4, 0.6, 0.8])
p['High'] = fuzz.trapmf(p.universe, [0.6, 0.8, 1, 1])
n['Very Small'] = fuzz.trapmf(n.universe, [0, 0, 0.1, 0.3])
n['Small'] = fuzz.trimf(n.universe, [0, 0.2, 0.4])
n['Rarely Small'] = fuzz.trimf(n.universe, [0.25, 0.35, 0.45])
n['Medium'] = fuzz.trimf(n.universe, [0.3, 0.5, 0.7])
n['Rarely Large'] = fuzz.trimf(n.universe, [0.55, 0.65, 0.75])
n['Large'] = fuzz.trimf(n.universe, [0.6, 0.8, 1])
n['Very Large'] = fuzz.trapmf(n.universe, [0.7, 0.9, 1, 1])
m.view()
s.view()
p.view()
n.view()
#Rules
rule1 = ctrl.Rule(p['Low'], n['Small']) # in the form of if-then
rule2 = ctrl.Rule(p['Medium'], n['Medium'])
rule3 = ctrl.Rule(p['High'], n['Large'])
rule4 = ctrl.Rule(m['Very Small'] & s['Small'], n['Very Large'])
rule5 = ctrl.Rule(m['Small'] & s['Small'], n['Large'])
rule6 = ctrl.Rule(m['Medium'] & s['Small'], n['Medium'])
rule7 = ctrl.Rule(m['Very Small'] & s['Medium'], n['Rarely Large'])
rule8 = ctrl.Rule(m['Small'] & s['Medium'], n['Rarely Small'])
rule9 = ctrl.Rule(m['Medium'] & s['Medium'], n['Small'])
rule10 = ctrl.Rule(m['Very Small'] & s['Large'], n['Medium'])
rule11 = ctrl.Rule(m['Small'] & s['Large'], n['Small'])
rule12 = ctrl.Rule(m['Medium'] & s['Large'], n['Very Small'])
rule1.view()
rule2.view()
rule3.view()
rule4.view()
rule5.view()
#Rule Application
#What would be the number of spares in the following circumstances:
##
#Mean Delay was 0.5
#Number of Servers was 0.3
#Repair Utilization Factor was 0.2
#We need the activation of our fuzzy membership function at these values.
ServiceCentre_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9, rule10, rule11, rule12])
ServiceCentre = ctrl.ControlSystemSimulation(ServiceCentre_ctrl)
#Pass inputs to the ControlSystem using Antecedent labels with Pythonic API
ServiceCentre.input['Mean Delay'] = 0.5
ServiceCentre.input['Number of Servers'] = 0.3
ServiceCentre.input['Repair Utilization Factor'] = 0.2
#Crunch the numbers
ServiceCentre.compute()
t=(ServiceCentre.output['Number of Spares'])
print("This is available number of spares:",t)
n.view(sim=ServiceCentre)
| [
"matplotlib"
] |
6e8e038ee76418a81b5cfe6701dbba826a83886b | Python | leamotta/algo3-tp1 | /algo3-tp1/src/ejemplo-graficos.py | UTF-8 | 576 | 3.59375 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
t = np.arange(1., 9., 1.)
plt.plot(t, t, 'r--', t, t*np.log2(t), 'bs', t, t**2, 'g^')
#r-- representa r de red -- porque lo dibuja con lineas punteadas, - es linea sin puntear
#bs representa b de blue s de squares
#g^ representa g de green y ^ de que son triangulos
plt.show()
t2 = [8,20,22,25,32,34,42,55]
plt.plot(t,t*10,'b-',t,t2,'g--')
plt.show()
#en este grafico graficamos los puntos (1,8), (2,20), (3,22)... hasta el (8,55)
#junto con la curva que representa una complejidad lineal que pasa por encima de la curva
| [
"matplotlib"
] |
19970388b3e772d28b816438d3ec0b0ab1a9941d | Python | vishalbelsare/distance-metrics | /src/standard.py | UTF-8 | 1,828 | 2.65625 | 3 | [
"MIT"
] | permissive | # matplotlib backtest for missing $DISPLAY
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
from reader import fetch_data
from normaliser import normalise
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
sns_blue, sns_green, sns_red, _, _, _ = sns.color_palette("muted")
sns.set_style("ticks")
plt.rcParams['figure.figsize'] = [6.0, 12.0]
fig, axes = plt.subplots(nrows=4, ncols=2)
tuples = [(axes[0, 0], 'none', 'Raw'),
(axes[0, 1], 'l2', 'L2 Normalised'),
(axes[1, 0], 'l1', 'L1 Normalised'),
(axes[1, 1], 'max', '$L_{\infty}$ Normalised'),
(axes[2, 0], 'standard', 'Standardardised'),
(axes[2, 1], 'maxabs', 'Maximum Absolute Value Scaled'),
(axes[3, 0], 'minmax', 'Minimum to Maximum Values Scaled'),
(axes[3, 1], 'robust', 'IQR and Median Scaled')]
for ax, method, title in tuples:
data = normalise(data=fetch_data(), method=method)
X_train, y_train = data['train']
X_test, y_test = data['test']
pca = PCA(n_components=2)
W_train = pca.fit_transform(X_train)
W_test = pca.transform(X_test)
_drawn = [False, False, False]
col = [sns_blue, sns_green, sns_red]
for w, y in zip(W_train, y_train):
if not _drawn[y - 1]:
ax.scatter(w[0], w[1], c=col[y - 1],
label='%s' % (y + 1))
_drawn[y - 1] = True
else:
ax.scatter(w[0], w[1], c=col[y - 1])
ax.legend(frameon=True)
# ax.set_xlabel('$w_{1}$')
# ax.set_ylabel('$w_{2}$')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title(title)
plt.savefig('data/out/standard_comparison.pdf',
format='pdf', dpi=300, transparent=True)
| [
"matplotlib",
"seaborn"
] |
8f91af8da3d4107fcfd31550732c3d23bc34f8fa | Python | FernCarrera/Filters | /tools/graphs/liveg_v2.py | UTF-8 | 3,127 | 2.921875 | 3 | [] | no_license |
from Kalman import One_D_Kalman
from tracking_dog.Dog import Dog
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import numpy as np
dog = Dog(measurement_var=500.5,process_var=30.0)
kal = One_D_Kalman()
dt = 1 # 30 frames per second
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
line, = ax.plot([],[],'ro')
time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
times = []
x_pos = []
sensor_pos = []
kalman_pos = []
prev = 0
patches = x_pos + sensor_pos + kalman_pos
states_to_track = 3 # make this input to class
lines = []
plotcols = ["0.8","orange","black"]
names = ['Sensor Data','Actual Movement','kalman_estimate']
markers = ['o','_',',']
# make lines of states to track
for index in range(states_to_track):
state_set = plt.plot([],[],color=plotcols[index],marker=markers[index],label=names[index])[0]
lines.append(state_set)
def init():
'''init animation'''
for line in lines:
line.set_data([],[])
time_text.set_text('')
return patches
x = (50.0,20.0**2)
var = []
def animate(i):
''' animation epoch'''
global dog,dt,x
dog.move(dt)
sensor = dog.move_and_sense() # makes dog move and return sensor data
prior = kal.predict(x,(0,0.40))
x = kal.update(prior,(sensor[1],1.5))
times.append(i)
x_pos.append(sensor[0])
sensor_pos.append(sensor[1])
kalman_pos.append(x[0])
var.append(x[1])
data = [sensor_pos,x_pos,kalman_pos]
time_text.set_text('Time = %.1f' % i)
for t,line in enumerate(lines):
#line.set_data(alist[i],blist[i],clist[i])
line.set_data(times,data[t])
time_text.set_text('Frame = %.1f' % i)
return lines,time_text
'''used to define interval time '''
from time import time
t0 = time()
animate(0)
t1 = time()
interval = 1000*dt - (t1-t0)
ani = FuncAnimation(fig,animate,frames=201,interval=dt,init_func=init,repeat=False)
def plot_residuals(Ps,sensor_pos,kalman_pos,y_label='none',stds=1):
''' z: sensor measurements
data: list storing actual position,sensor_pos & kalman estimate
'''
res = [a-b for a,b in zip(sensor_pos,kalman_pos)] # calc residual
std = np.sqrt(Ps) * stds # standard deviations
siz = [1]*len(res)
neg = [x*-std for x in siz]
pos = [x*std for x in siz]
plt.plot(range(len(res)),neg, color='k', ls=':', lw=2)
plt.plot(range(len(res)),pos, color='k', ls=':', lw=2)
plt.fill_between(range(len(res)),std,-std,alpha=0.3,color='yellow')
plt.plot(res)
plt.xlabel('time s')
plt.ylabel(y_label)
plt.title('Residuals with: {:d} Standard Deviation'.format(stds))
#plot_residual_limits(data.P[:,col,col],stds)
#set_labels(title,'time (sec), y_label')
plt.xlim(0,200)
plt.ylim(0,400)
plt.xlabel('time s')
plt.ylabel('position m')
plt.title('Simulated Sensor Data')
plt.legend()
plt.show()
plot_residuals(500,sensor_pos,kalman_pos)
plt.show()
plt.xlabel('time s')
plt.ylabel(' m^2')
plt.title('Variance')
plt.plot(var)
plt.show()
print('Variance converged to {:.3f}'.format(var[-1]))
| [
"matplotlib"
] |
e5f58d003ea98d1638cd2ffb1a5a0bafef727932 | Python | craiglongnecker/PythonExamples | /FinanceExample.py | UTF-8 | 2,969 | 3.390625 | 3 | [] | no_license | # Importing packages and creating aliases for the packages
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
import pandas as pd
import pandas_datareader.data as web
style.use('ggplot') # Style type
#start = dt.datetime(2009, 1, 1) # Start date
#end = dt.datetime(2018, 12, 31) # End date
#df = web.DataReader('TSLA', 'yahoo', start, end) # Set up data frame using Tesla ticker
#print(df.head()) # Print first five rows in data frame
#print(df.tail()) # Print last five rows in data frame
#df.to_csv('tsla.csv') # Data frame to save Telsa historical data to .csv file
#df = pd.read_csv('tsla.csv') # Read .csv file in data frame
df = pd.read_csv('tsla.csv', parse_dates = True, index_col = 0) # Read .csv file in data frame with dates parsed and columns
#print(df.head()) # Print first five rows in tsla.csv file
#print(df[['Open', 'High']].head()) # Print only Open and High of first five rows of tsla.csv file
#df.plot() # Plot graph of Tesla data
#df['Adj Close'].plot() # Plot graph of Tesla Adjusted Close data
#plt.show() # Show graph of Tesla data
#df['100ma'] = df['Adj Close'].rolling(window = 100).mean() # Data frame for 100 day Moving Average based on Adjusted Close
#df.dropna(inplace = True) # Modify data frame in place as True
#print(df.head()) # Print first five rows in data frame including Moving Average
#print(df.tail()) # Print last five rows in data frame including Moving Average
#df['100ma'] = df['Adj Close'].rolling(window = 100, min_periods = 0).mean() # Data frame for 100 day Moving Average based on Adjusted Close with minimum periods
#print(df.head()) # Print first five rows in data frame including Moving Average with minimum periods
#print(df.tail()) # Print last five rows in data frame including Moving Average with minimum periods
df_ohlc = df['Adj Close'].resample('10D').ohlc() # Data frame for Adjusted Close resampled over 10 days for open, high, low, and close
df_volume = df['Volume'].resample('10D').sum() # Data frame for Volume resampled over 10 days for sum
df_ohlc.reset_index(inplace = True)
#print(df_ohlc.head())
df_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num)
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan = 5, colspan = 1) # Create subplot ax1
ax2 = plt.subplot2grid((6, 1), (5, 0), rowspan = 1, colspan = 1, sharex = ax1) # Create subplot ax2 and share with ax1
ax1.xaxis_date() # Take end dates and displays as nice dates
candlestick_ohlc(ax1, df_ohlc.values, width = 2, colorup = 'g') # Takes ax1 and creates values
ax2.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0) # Fills values x from 0 to y
plt.show()
#ax1.plot(df.index, df['Adj Close']) # Plot Adjusted Close line graph in red
#ax1.plot(df.index, df['100ma']) # Plot 100 Moving Average line graph in blue
#ax2.bar(df.index, df['Volume']) # Plot Volume bar graph
#plt.show() # Plot line and bar graphs
| [
"matplotlib"
] |
ec07939712cd9a0b08865a3ec60a1cf8faf421ff | Python | matanmula172/Intelligent_traffic_lights | /traffic_management_algorithm.py | UTF-8 | 10,199 | 2.96875 | 3 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
import random
from car import *
from consts import *
import numpy as np
# returns true of edged is entering a traffic light node
def entering_traffic_light(edge):
return edge[1][0] == 't'
def insert_cars_into_queue(queue, cars):
for i in range(cars):
global LAST_CAR
global CURRENT_TIME
LAST_CAR += 1
car = Car(CURRENT_TIME, LAST_CAR)
queue.append(car)
return queue
def remove_cars_from_queue(queue, cars):
for i in range(cars):
queue.pop(0)
return queue
def transfer_cars_q1_to_q2(q1, q2, cars):
for i in range(cars):
if len(q1) == 0:
break
car = q1.pop(0)
q2.append(car)
return q1, q2
# returns traffic light id of an edge
def get_traffic_light_id(edge):
if entering_traffic_light(edge):
return edge[1]
return edge[0]
# initializes current flow dict to zeroes
def init_current_flow():
for edge in edges_lst:
current_flow[edge] = list()
def init_distribution_dict():
i = 0
global lambda_lst
for edge in edges_lst:
if not entering_traffic_light(edge):
distribution_dict[edge] = lambda_lst[i]
e = get_succ_edge(edge)
distribution_dict[e] = lambda_lst[i]
i += 1
# initializes loss dict to zeroes
def init_loss_dict():
loss_dict["t1"] = 0
loss_dict["t2"] = 0
# returns a successor edge of edge
def get_succ_edge(edge):
if entering_traffic_light(edge):
return None
for e in edges_lst:
if entering_traffic_light(e) and e[1] == edge[0] and e[0] != edge[1]:
return e
return None
# adds random number of cars (flow - by allowed capacity) to the roads entering the intersection
def add_rand_flow():
# print("add random flow:")
for edge in edges_lst:
if entering_traffic_light(edge):
capacity = edge[2]
added_cars = random.randint(0, capacity - len(current_flow[edge]))
current_flow[edge] = insert_cars_into_queue(current_flow[edge], added_cars)
# print(str(edge) + " add = " + str(added_cars), end=',')
# print()
# redacts random number of cars (flow - by current flow) from the roads exiting
# the intersection
def redact_rand_flow():
for edge in edges_lst:
if not entering_traffic_light(edge):
cars_num = len(current_flow[edge])
redacted_cars = random.randint(0, cars_num)
current_flow[edge] = remove_cars_from_queue(current_flow[edge], redacted_cars)
# adds number of cars (flow - by allowed capacity) to the roads entering the intersection by poisson distribution
def add_poisson_flow():
global IS_LIMITED_CAPACITY
for edge in edges_lst:
if entering_traffic_light(edge):
added_flow = np.random.poisson(distribution_dict[edge], 1)[0]
edge_flow = len(current_flow[edge])
capacity = edge[2]
if IS_LIMITED_CAPACITY and edge_flow + added_flow <= capacity:
added_flow = capacity - edge_flow
current_flow[edge] = insert_cars_into_queue(current_flow[edge], added_flow)
else:
current_flow[edge] = insert_cars_into_queue(current_flow[edge], added_flow)
# redacts number of cars (flow - by current flow) from the roads exiting
# the intersection by poisson distribution
def redact_poisson_flow():
for edge in edges_lst:
if not entering_traffic_light(edge):
cars_num = len(current_flow[edge])
redacted_cars = np.random.poisson(distribution_dict[edge], 1)[0]
if redacted_cars > cars_num:
redacted_cars = cars_num
current_flow[edge] = remove_cars_from_queue(current_flow[edge], redacted_cars)
# given to edges that theirs light has been turned green, updates the number of cars (flow)
# under the assumption all cars have crossed
def update_green_light_flow(succ_edge, edge, quantum):
global IS_LIMITED_CAPACITY
is_limited_capacity = IS_LIMITED_CAPACITY
added_flow = min(quantum, len(current_flow[succ_edge]))
capacity = edge[2]
edge_flow = len(current_flow[edge])
if not is_limited_capacity:
current_flow[succ_edge], current_flow[edge] = transfer_cars_q1_to_q2(current_flow[succ_edge],
current_flow[edge], added_flow)
return
if edge_flow + added_flow <= capacity:
current_flow[succ_edge], current_flow[edge] = transfer_cars_q1_to_q2(current_flow[succ_edge],
current_flow[edge], added_flow)
else:
real_flow = capacity - len(current_flow[edge])
current_flow[succ_edge], current_flow[edge] = transfer_cars_q1_to_q2(current_flow[succ_edge],
current_flow[edge], real_flow)
# given a light that has been switched to green, updates all relevant edge's flows
def green_light(light, quantum):
for edge in edges_lst:
if not entering_traffic_light(edge) and edge[0] == light:
succ_edge = get_succ_edge(edge)
if succ_edge is not None:
update_green_light_flow(succ_edge, edge, quantum)
# init a graph
def create_graph(edges_weighted_lst):
G = nx.DiGraph()
G.add_weighted_edges_from(edges_weighted_lst)
return G
def calculate_queue_weight(queue):
loss = 0
global CURRENT_TIME
for car in queue:
loss += car.get_car_weight(CURRENT_TIME)
return loss
# calculate a loss to each edge
def calculate_edge_loss(edge):
# this edge is not going into the intersection
if edge[0] == "t1" or edge[0] == "t2":
return 0
loss = 0
for e in edges_lst:
if entering_traffic_light(e) and e[1] != edge[1]:
loss += calculate_queue_weight(current_flow[e])
return loss
# switches the lights according to the loss
def switch_lights(quantum):
for edge in edges_lst:
loss = calculate_edge_loss(edge)
loss_dict[get_traffic_light_id(edge)] += loss
if loss_dict["t1"] < loss_dict["t2"]:
green_light("t1", quantum)
else:
green_light("t2", quantum)
# plots the graph
def plot_graph(graph):
nx.draw_networkx(graph, node_color='green')
plt.show()
def get_avg_waiting_time(edge):
global CURRENT_TIME
queue = current_flow[edge]
total_waiting_time = 0
if len(queue) == 0:
return 0
for car in queue:
total_waiting_time += (CURRENT_TIME - car.get_arrival_time())
avg = float(total_waiting_time) / float(len(queue))
return avg
def get_max_waiting_time(edge):
global CURRENT_TIME
queue = current_flow[edge]
if len(queue) == 0:
return 0
return CURRENT_TIME - queue[0].get_arrival_time()
def plot_all(edge, avg_time_quantum, min_q, max_q, stat_type, is_poisson=False):
my_time = [i for i in range(100)]
for q in range(min_q, max_q):
y_axis = avg_time_quantum[q]
plt.legend()
if is_poisson:
plt.title("lambda = " + str(distribution_dict[edge]))
plt.plot(my_time, y_axis, label=str("line " + str(q)))
plt.savefig(str(edge[0]) + ' to ' + str(edge[1]) + '_' + stat_type + '.png')
plt.clf()
# plt.show()
def stat_avg_quantum():
global CURRENT_TIME
global IS_POISSON
is_poisson = IS_POISSON
max_q = 20
min_q = 10
time_limit = 100
for e in edges_lst:
avg_time_quantum = np.zeros((max_q, time_limit))
for q in range(min_q, max_q):
for i in range(100):
CURRENT_TIME = 0
init_distribution_dict()
init_current_flow()
init_loss_dict()
for time in range(time_limit):
CURRENT_TIME += 1
if IS_LIMITED_CAPACITY:
add_rand_flow()
redact_rand_flow()
else:
add_poisson_flow()
redact_poisson_flow()
switch_lights(q)
avg_time_quantum[q][time] += get_avg_waiting_time(e)
avg_time_quantum = avg_time_quantum / 100
plot_all(e, avg_time_quantum, min_q, max_q, "avg", is_poisson)
def stat_max_quantum():
global CURRENT_TIME
global IS_POISSON
is_poisson = IS_POISSON
max_q = 20
min_q = 10
time_limit = 100
for e in edges_lst:
avg_time_quantum = np.zeros((max_q, time_limit))
for q in range(min_q, max_q):
for i in range(100):
CURRENT_TIME = 0
init_distribution_dict()
init_current_flow()
init_loss_dict()
for time in range(time_limit):
CURRENT_TIME += 1
if IS_LIMITED_CAPACITY:
add_rand_flow()
redact_rand_flow()
else:
add_poisson_flow()
redact_poisson_flow()
switch_lights(q)
avg_time_quantum[q][time] += get_max_waiting_time(e)
avg_time_quantum = avg_time_quantum / 100
plot_all(e, avg_time_quantum, min_q, max_q, "max", is_poisson)
def print_stage(stage):
print(stage)
print(CURRENT_TIME)
for l in current_flow.keys():
print(str(l) + " " + str(len(current_flow[l])) + " " + str(calculate_edge_loss(l)))
print()
if __name__ == "__main__":
stat_avg_quantum()
# stat_max_quantum()
init_distribution_dict()
init_current_flow()
init_loss_dict()
for i in range(100):
CURRENT_TIME += 1
if IS_LIMITED_CAPACITY:
add_rand_flow()
print_stage("add")
redact_rand_flow()
print_stage("redact")
else:
add_poisson_flow()
print_stage("add")
redact_poisson_flow()
print_stage("redact")
switch_lights(QUANTUM)
print_stage("switch")
| [
"matplotlib"
] |
1945231e8c10a6614a27d14ee2474e0959758452 | Python | jgoppert/casadi_f16 | /trim.py | UTF-8 | 1,756 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | # pylint: disable=invalid-name, too-many-locals, missing-docstring, too-many-arguments, redefined-outer-name
import time
import matplotlib.pyplot as plt
import casadi as ca
import numpy as np
import f16
# %%
start = time.time()
p = f16.Parameters()
x0, u0 = f16.trim(
s0=[0, 0, 0, 0, 0, 0],
x=f16.State(VT=500, alt=5000),
p=p,
phi_dot=0, theta_dot=0, psi_dot=0.2, gam=0)
print('trim computation time', time.time() - start)
# %%
start = time.time()
f_control = lambda t, x: u0
data = f16.simulate(x0, f_control, p, 0, 10, 0.01)
print('sim computation time', time.time() - start)
state_index = f16.State().name_to_index
plt.figure()
plt.plot(data['x'][:, state_index('p_E')], data['x'][:, state_index('p_N')])
plt.xlabel('E, ft')
plt.ylabel('N, ft')
plt.show()
plt.figure()
plt.plot(data['t'], data['x'][:, state_index('alpha')], label='alpha')
plt.plot(data['t'], data['x'][:, state_index('beta')], label='beta')
plt.plot(data['t'], data['x'][:, state_index('theta')], label='theta')
plt.legend()
plt.show()
plt.figure()
plt.plot(data['t'], data['x'][:, state_index('VT')], label='VT')
plt.legend()
plt.show()
plt.figure()
plt.plot(data['t'], data['x'][:, state_index('phi')], label='phi')
plt.plot(data['t'], data['x'][:, state_index('theta')], label='theta')
plt.plot(data['t'], data['x'][:, state_index('psi')], label='psi')
plt.legend()
plt.show()
# plt.figure()
#plt.plot(data['t'], data['x'][:, state_index('p_E')])
p = f16.Parameters()
u_list = []
VT_list = np.arange(100, 800, 50)
for VT in VT_list:
x0, u0 = trim(np.zeros(6), f16.State(VT=VT), p, 0, 0, 0, 0)
u_list.append(np.array(u0.to_casadi()))
u_list = np.hstack(u_list)
plt.plot(VT_list, 100*u_list[0, :])
plt.xlabel('VT, ft/s')
plt.ylabel('power, %')
# %%
| [
"matplotlib"
] |
63867fe319a2cdf880a86b2629aaa69cdedc6356 | Python | smakethunter/Poisson | /models/web_parser.py | UTF-8 | 5,024 | 2.796875 | 3 | [] | no_license | # import pandas as pd
# import numpy as np
# import re
# import matplotlib.pyplot as plt
# from scipy.optimize import curve_fit
# from scipy.special import factorial
# #funkcje pomocnicze
# def take_n_split(string,delimiter,pos):
# s=string.split(delimiter)
# return int(s[pos])
# def select_minute(data,minute):
# return data.loc[data['Minutes'] == minute]
#
# data=pd.read_csv('weblog.csv')
# data=data.head(50)
#
# data['Minutes']=data['Time'].apply(lambda x: take_n_split(x,':',2))
# data['Seconds']=data['Time'].apply(lambda x: take_n_split(x,':',3))
# data=select_minute(data,38)
# nr_events=len(data['Seconds'])
#
# logs_per_second=[]
# for i in range (60):
# n_logs=len(data.loc[data['Seconds']==i])
# logs_per_second.append([i,n_logs])
#
# ax=plt.figure()
# plt.scatter(np.array(logs_per_second)[:,0],np.array(logs_per_second)[:,1])
# plt.plot(np.array(logs_per_second)[:,0],np.array(logs_per_second)[:,1])
# plt.show()
# lps=np.array(logs_per_second)
# prob_lps=[i/len(lps) for i in lps[:,1]]
#
# fig=plt.figure()
# plt.scatter(lps[:,0],prob_lps)
#
# plt.show()
#
# # dystrybuanta
# distribution=[]
# dist2=[0]
# distribution.append(logs_per_second[0])
# for i in range(1,len(logs_per_second)):
#
# distribution.append([i,distribution[i-1][1]+logs_per_second[i][1]/nr_events])
# dist2.append(distribution[i-1][1])
# ax2=plt.figure()
# dist=np.array(distribution)
# #plt.plot(dist[:,0],dist[:,1])
# plt.scatter(dist[:,0],dist[:,1])
# d=np.array(dist2)
#
# plt.plot(dist[:,0],d,'bo',mfc='none')
#
# plt.show()
#
# def poisson(k, lamb):
# return (lamb**k/factorial(k)) * np.exp(-lamb)
#
# # fit with curve_fit
#
#
# parameters, cov_matrix = curve_fit(poisson, lps[:,0],prob_lps)
#
# figure2=plt.figure()
# x=np.linspace(0,60,1000)
#
# plt.plot(x, poisson(x, parameters), 'r-', lw=2)
# plt.scatter(lps[:,0], poisson(lps[:,0], parameters))
# plt.show()
#
# %% md
## Statystyka i procesy stochastyczne, Ćw. proj., grupa 4
# %%
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
from IPython.core.display import display
from scipy.optimize import curve_fit
from scipy.special import factorial
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# funkcje pomocnicze
def take_n_split(string, delimiter, pos):
s = string.split(delimiter)
if s[pos][0] == '0':
return int(s[pos][1])
else:
return int(s[pos])
def select_minute(data, minute):
return data.loc[data['Minutes'] == minute]
def select_hour(data, hour):
return data.loc[data['Hours'] == hour]
def select_time_data(data, n_samples, hour, minute):
new_data = data.head(n_samples)
new_data['Minutes'] = new_data['Time'].apply(lambda x: take_n_split(x, ':', 2))
new_data['Seconds'] = new_data['Time'].apply(lambda x: take_n_split(x, ':', 3))
new_data['Hours'] = new_data['Time'].apply(lambda x: take_n_split(x, ':', 1))
new_data['in_minute'] = new_data['Minutes'].apply(lambda x: x == minute)
new_data['in_hour'] = new_data['Hours'].apply(lambda x: x == hour)
data_out = new_data.loc[(new_data['in_minute'] == True) & (new_data['in_hour'] == True)]
display(data_out)
return data_out, len(data_out)
# %%
data = pd.read_csv('/home/smaket/PycharmProjects/Logi i poisson/weblog.csv')
selected_minute_data, nr_events = select_time_data(data, 50, 13, 38)
logs_per_second = []
for i in range(60):
in_minute = selected_minute_data.apply(lambda x: x['Seconds'] == i, axis=1)
n_logs = len(in_minute[in_minute == True].index)
logs_per_second.append([i, n_logs])
ax = plt.figure()
plt.scatter(np.array(logs_per_second)[:, 0], np.array(logs_per_second)[:, 1])
plt.plot(np.array(logs_per_second)[:, 0], np.array(logs_per_second)[:, 1])
plt.title("Ilość zgłoszeń na sekundę w ciągu minuty")
plt.show()
# %%
lps = np.array(logs_per_second)
prob_lps = [i / len(lps) for i in lps[:, 1]]
fig = plt.figure()
plt.scatter(lps[:, 0], prob_lps)
plt.title("Prawdopodobieństwo zgłoszenia w danej sekundzie")
plt.show()
# %%
# dystrybuanta
distribution = []
dist2 = [0]
distribution.append(logs_per_second[0])
for i in range(1, len(logs_per_second)):
distribution.append([i, distribution[i - 1][1] + logs_per_second[i][1] / nr_events])
dist2.append(distribution[i - 1][1])
ax2 = plt.figure()
dist = np.array(distribution)
# plt.plot(dist[:,0],dist[:,1])
plt.scatter(dist[:, 0], dist[:, 1])
d = np.array(dist2)
plt.plot(dist[:, 0], d, 'bo', mfc='none')
plt.title("Dystrybuanta empiryczna")
plt.show()
# %%
def poisson(k, lamb):
return (lamb ** k / factorial(k)) * np.exp(-lamb)
# fit with curve_fit
parameters, cov_matrix = curve_fit(poisson, lps[:, 0], prob_lps)
ax = plt.gca(title='Rozkład poissona- krzywa dopasowana')
x = np.linspace(0, 60, 1000)
ax.plot(x, poisson(x, parameters),'r',label=f"$\lambda = {parameters}$")
ax.scatter(lps[:, 0], poisson(lps[:, 0], parameters),label='punkty wyznaczone empirycznie')
ax.legend()
plt.show()
# %% md
| [
"matplotlib"
] |
8a874efb0ef6df1e9763e866f30f78fa9f1f5ab2 | Python | ASU-CompMethodsPhysics-PHY494/final-stern-gerlach-simulation | /work/wavepacket.py | UTF-8 | 2,755 | 3.5 | 4 | [
"CC0-1.0"
] | permissive | ## Here we want to solve Schrodinger eqn for an electron
## in an inhomogenous magnetic field.
## We first analyze the indepent solution first where
## the exponent that depends on time is set to 1.
## We use units where hbar = 1, so that p = k and m = 1
import numpy as np
import matplotlib.pyplot as plt
## we first define the gaussian function that depends on momentum from the literature eqn 10.10
def gaussian(px,py,pz,kx,beta,n):
## the wave is strongly peaked around k, beta is the Gaussian RMS width, and n
## is the dimensions of the gaussian function
g = ((2*np.pi) / beta**(2))**(n/2) *np.exp(-(2*beta**(2))**(-1)*((px-kx)**(2)+py**(2)+pz**(2)))
return g
def energy(px,py,pz):
## stating the definition of kinetic energy
return .5*(px**(2) + py**(2) + pz**(2))
def time_dependence(t, energy):
return np.exp(-t*1j*energy)
def spinor_in(gaussian, time_dependence):
## Here we are defining the spinor function of the electron
## before it enters the space containing the inhomogeneous magnetic field
## the spinor_in is the inverse fourier transform of the gaussian function
## as the width of the gaussian function's width increases the spinor function's
## width decreases and becomes sharply peaked at the center.
A = gaussian*time_dependence
return np.fft.ifft(A)
def psi_in(spinor_in):
## define the spin up spin down coefficients
c_u = 1/2
c_d = -1/2
## place the spinor function in the the spin up sin dwon basis
## to define psi in which is a complex function.
return np.array([c_u*spinor_in, c_d*spinor_in])
def plot_spinor_in(spinor_in,N, figname="plot_of_spinor_in.pdf" ):
"""Plot of spinor_in."""
x = np.fft.fftfreq(N) # defines the sample positions for the inverse fourier transform
xs = np.fft.ifftshift(x) # shifts the zero position to the center
y=(1/N)*(np.abs(np.fft.ifftshift(spinor_in)))**2 # the spionor function is complex
fig = plt.figure() # so we take the absolute value squared and divide by 1/N
ax = fig.add_subplot(111)
ax.plot(xs,y)
ax.set_xlabel('position')
ax.set_ylabel(r'$|\chi_i|^2$')
if figname:
fig.savefig(figname)
print("Wrote figure to {}".format(figname))
def plot_gaussian(p,gaussian, figname="plot_of_gaussian.pdf"):
"""Plot of the gaussian."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(p,gaussian)
ax.set_xlabel('momentum')
ax.set_ylabel('gaussian')
if figname:
fig.savefig(figname)
print("Wrote figure to {}".format(figname))
| [
"matplotlib"
] |
a5075c9041ba7a12504d47cb4093f63449d4db0f | Python | AndreyZhunenko/Computer_vision_character_dictionary | /frequency_character_dictionary.py | UTF-8 | 2,516 | 2.78125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import label, regionprops
from skimage import measure
from scipy.ndimage import binary_dilation
from skimage.filters import threshold_otsu
def count_holes(symbol):
if hasattr(symbol, "image"):
zero = symbol.image
else:
zero = symbol
zero = ~zero
zones = np.ones((zero.shape[0]+2,
zero.shape[1]+2))
zones[1:-1, 1:-1] = zero
z1 = label(zones)
return np.max(z1)-1
def has_vline(symbol):
image = symbol.image
lines = np.sum(image, 0) // image.shape[0]
return 1 in lines
def is_A(symbol):
image = symbol.image
zones = image[:].copy()
zones[-1,:] = 1
holes = count_holes(zones)
return holes == 2
def count_bays(symbol):
zero=symbol.image
holes = ~zero.copy()
lb = label(holes)
return np.max(lb)
def recognize(symbol):
holes = count_holes(symbol)
if holes == 2:
if has_vline(symbol):
return "B"
else:
return "8"
elif holes == 1:
if is_A(symbol):
return "A"
elif has_vline(symbol):
lomme=symbol.convex_area/(symbol.image.shape[0]*symbol.image.shape[1])
if lomme > 0.85:
return "D"
else:
return "P"
else:
return "0"
elif holes == 0:
if np.all(symbol.image):
return "-"
elif has_vline(symbol):
return "1"
else:
if count_bays(symbol) == 4:
return "X"
elif count_bays(symbol) == 5:
return "W"
else:
arr = symbol.image
ratio = arr.shape[0] / arr.shape[1]
if 0.8 < ratio < 1.2:
return "*"
elif 1.6 < ratio < 2.2:
return "/"
return ""
def main_function_logic():
my_image = plt.imread("symbols.png")
gray = np.average(my_image, 2)
gray[gray>0] = 1
gray = gray.astype("uint8")
my_label = label(gray)
total = np.max(my_label)
arr=np.zeros_like(my_label)
symbols = regionprops(my_label)
recon={"":0}
for symbol in symbols:
sym=recognize(symbol)
if sym not in recon:
recon[sym] = 1
else:
recon[sym] += 1
print()
print(recon)
print()
print("Recognizing rate: {}".format((total - recon[""]) / total))
main_function_logic()
| [
"matplotlib"
] |
62e0bf82f051cff93d89d82adf8ff7f708727693 | Python | codeaudit/automated-statistician | /src/alt_autostat.py | UTF-8 | 4,942 | 2.5625 | 3 | [] | no_license | from bayesian_optimizer import *
from gaussian_process import *
from kernels import *
import time
from sklearn.datasets import make_blobs
from sklearn.neighbors import RadiusNeighborsClassifier # DO THIS
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
# Set up the modules for bayesian optimizer
kernel = SquaredExponential(n_dim=1,
init_scale_range=(.01,.1),
init_amp=1.)
gp = GaussianProcess(n_epochs=100,
batch_size=10,
n_dim=1,
kernel=kernel,
noise=0.05,
train_noise=False,
optimizer=tf.train.GradientDescentOptimizer(0.001),
verbose=0)
bo = BayesianOptimizer(gp, region=np.array([[-1., 1.]]),
iters=100,
tries=10,
optimizer=tf.train.GradientDescentOptimizer(0.1),
verbose=0)
def train(key, x):
model = models[key]
if key in ['svc', 'l1', 'l2']:
log_hyper = x * 7.
hyper = np.exp(log_hyper)
model.set_params(C=hyper)
elif key in ['knn']: # DO THIS
hyper = 7 + (x + 1)/2*23
model.set_params(radius=hyper)
t = time.time()
y_pred = model.fit(X_train, y_train).predict(X_test)
perf = (y_pred == y_test).mean()
t = time.time() - t
print "...Result: perf={0:.5f}, time={1:.5f}".format(perf, t)
return perf, t
def run(key, x):
y, t = train(key, x)
datas[key][0].append(x)
datas[key][1].append(y)
datas[key][2].append(t)
return t
# Make dataset
np.random.seed(1) # DO THIS
data1 = make_blobs(n_samples=10000,
n_features=2, centers=2,
cluster_std=3.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1)
data2 = make_blobs(n_samples=10000,
n_features=2, centers=2,
cluster_std=3.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=2)
X = np.vstack((data1[0], data2[0]))
y = np.concatenate((data1[1], data2[1]))
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.33,
random_state=42)
# Create dataset holder for SVC, logit1, and logit2.
# Data is tuple that contains x, y, t
datas = {'knn' : ([],[],[]),
'l1' : ([],[],[]),
'l2' : ([],[],[])}
models = {'knn' : RadiusNeighborsClassifier(outlier_label=0), # DO THIS
'l1' : LogisticRegression(penalty='l1'),
'l2' : LogisticRegression(penalty='l2')}
# models = {'knn' : RadiusNeighborsClassifier(outlier_label=0)}
# Train the first time
for key in models:
x = np.random.uniform(-1, 1)
run(key, x)
time_left = 60
discount = 0.9
counter = 0
while time_left > 0:
# Figure out the best point of each model:
r_s = -np.inf
key_s = None
x_s = None
for key in models:
x = np.array(datas[key][0]).reshape(-1,1)
y = np.array(datas[key][1]).reshape(-1,1)
bo.fit(x, y)
x_n, _, _ = bo.select()
if counter % 5 == -1:
# Plot stuff
x_plot = np.linspace(-1, 1, 100).reshape(-1,1)
y_plot, var_plot = gp.np_predict(x_plot)
ci = var_plot ** .5
plt.plot(x_plot, y_plot)
plt.plot(x_plot, y_plot+ci, 'g--')
plt.plot(x_plot, y_plot-ci, 'g--')
plt.scatter(x, y)
plt.show()
y, y_var = gp.np_predict(x_n)
t, t_var = np.mean(datas[key][2]), np.std(datas[key][2]) + 1e-3
# Thompson sampling
y_n = np.random.normal(y, np.sqrt(y_var))
t_n = np.random.normal(t, np.sqrt(t_var))
r_n = y_n if (time_left - t_n) > 0 else -1
print "{0:4s}: [{1:.3f}, {2:.3f}, {3:.3f}]. Time: {4:.3f}, R: {5:.3f}".format(key,
x_n[0][0],
y[0][0],
y_n,
t_n,
r_n)
if r_n > r_s:
r_s = r_n
key_s = key
x_s = x_n
# Run the model
print "Counter:",counter,
print "Training: {0:s}(x={1:.3f}). Time left: {2:.3f}".format(key_s, x_s[0][0], time_left)
counter += 1
t = run(key_s, x_s[0][0])
time_left -= t
| [
"matplotlib"
] |
12a31b6428d312fd0066c804ae51d86e44ef8579 | Python | yuhsiangfu/regression | /logistic_regression2.py | UTF-8 | 4,824 | 3.125 | 3 | [
"MIT"
] | permissive | """
Exercise: Logistic Regression2
@auth: Yu-Hsiang Fu
@date: 2018/09/20
"""
# --------------------------------------------------------------------------------
# 1.Import packages
# --------------------------------------------------------------------------------
import copy
import matplotlib.pyplot as plt
import numpy as np
import pickle
import sys
# --------------------------------------------------------------------------------
# 2.Const variables
# --------------------------------------------------------------------------------
# program variable
# ROUND_DIGITS = 8
# plot variable
PLOT_X_SIZE = 5
PLOT_Y_SIZE = 5
PLOT_DPI = 300
PLOT_FORMAT = "png"
# --------------------------------------------------------------------------------
# 3.Define function
# --------------------------------------------------------------------------------
def p_deepcopy(data, dumps=pickle.dumps, loads=pickle.loads):
return loads(dumps(data, -1))
def normalize_data(d):
for i in range(d.shape[1]):
mean = np.mean(d[:, i])
sigma = np.std(d[:, i])
d[:, i] = (d[:, i] - mean) / sigma
return d
def matricization(x):
x0 = np.ones([x.shape[0], 1])
x3 = x[:, 0, np.newaxis] ** 2
return np.hstack([x0, x, x3])
def f_w(x, w):
return 1 / (1 + np.exp(-np.dot(x, w)))
def classify(x, w):
return (f_w(x, w) >= 0.5).astype(np.int)
def logistic_regression(x, y, w, rate_learning):
max_epoch = 1000
accuracy_list = list()
# DO UPDATE-WEIGHT
for i in range(max_epoch):
p = np.random.permutation(x.shape[0])
for xi, yi in zip(x[p, :], y[p]):
w = w - rate_learning * np.dot((f_w(xi, w) - yi), xi)
classify_result = (classify(x, w) == y)
classify_accuracy = len(classify_result[classify_result == True]) / len(y)
accuracy_list.append(classify_accuracy)
print(i + 1, w, round(classify_accuracy, 2))
return w, accuracy_list
def draw_plot(x, y, w, accuracy_list):
# scatter-plot
fig, ax = plt.subplots(figsize=(PLOT_X_SIZE, PLOT_Y_SIZE), facecolor='w')
# draw plot
x1_p = np.linspace(-2.2, 2)
x2_p = -((w[0] + (w[1] * x1_p) + (w[3] * np.power(x1_p, 2))) / w[2])
plt.plot(x[y == 1, 0], x[y == 1, 1], "o", color="b", ms=7, mew=1)
plt.plot(x[y == 0, 0], x[y == 0, 1], "x", color="r", ms=7, mew=2)
plt.plot(x1_p, x2_p, color="r", ls="--")
# plot setting
ax.grid(color="k", linestyle="dotted", linewidth=0.8, alpha=0.8)
ax.set_xlabel(r"$x_1$", fontdict={"fontsize": 12})
ax.set_ylabel(r"$x_2$", fontdict={"fontsize": 12})
ax.set_xlim(-2.2, 2.0)
ax.set_ylim(-2.2, 1.5)
ax.set_title("Logistic Regression", fontdict={"fontsize": 12})
ax.tick_params(axis="both", direction="in", which="major", labelsize=8)
# legend-text
legend_text = ["class1", "class2", "logistic"]
ax.legend(legend_text, loc=4, fontsize="small", prop={"size": 8}, ncol=1, framealpha=1)
# save image
plt.tight_layout()
plt.savefig("logistic-regression2.png", dpi=PLOT_DPI, format=PLOT_FORMAT)
plt.close()
# --------------------------------------------------
# accuracy-line plot
fig, ax = plt.subplots(figsize=(PLOT_X_SIZE, PLOT_Y_SIZE), facecolor='w')
# draw plot
x0_p = np.arange(len(accuracy_list))
plt.plot(x0_p, accuracy_list, color="r", ls="-", lw=2)
# plot setting
ax.grid(color="k", linestyle="dotted", linewidth=0.8, alpha=0.8)
ax.set_xlabel("Iteration", fontdict={"fontsize": 12})
ax.set_ylabel("Accuracy", fontdict={"fontsize": 12})
ax.set_xlim(-5, len(accuracy_list))
ax.set_ylim(0, 1.1)
ax.set_title("Logistic Regression", fontdict={"fontsize": 12})
ax.tick_params(axis="both", direction="in", which="major", labelsize=8)
# legend-text
legend_text = ["classification accuracy"]
ax.legend(legend_text, loc=4, fontsize="small", prop={"size": 8}, ncol=1, framealpha=1)
# save image
plt.tight_layout()
plt.savefig("logistic-regression2_accuracy.png", dpi=PLOT_DPI, format=PLOT_FORMAT)
plt.close()
# --------------------------------------------------------------------------------
# 4.Main function
# --------------------------------------------------------------------------------
def main_function():
# input, output
d = np.loadtxt("data4.csv", delimiter=",")
x = d[:, 0:2]
y = d[:, 2]
x = normalize_data(x)
X = matricization(x)
# --------------------------------------------------
# logistic regression
rate_learning = 0.01
w = np.random.rand(4)
w, accuracy_list = logistic_regression(X, y, w, rate_learning)
# --------------------------------------------------
# draw scatter-line plot and contour-plot
draw_plot(x, y, w, accuracy_list)
if __name__ == '__main__':
main_function()
| [
"matplotlib"
] |
ed6c91f4ff7ece8594a07cfe57edd42ae7fb627c | Python | gerkamspiano/QuantMacro | /Q1.2PS4.py | UTF-8 | 1,802 | 2.84375 | 3 | [
"MIT"
] | permissive | #NOW ADDING LABOR
import numpy as np
from numpy import vectorize
import sympy as sy
import math as mt
import scipy.optimize as sc
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
import scipy.sparse as sp
# Parametrization of the model:
theeta = 0.679 # labor share
beta = 0.988 # discount factor
delta = 0.013 # depreciation rate
css=0.8341
iss=0.1659
hss=0.29999
kss=12.765
kappa=5.24
v=2.0
ki = np.array(np.linspace(0.1, 20, 100))
kj = np.array(np.linspace(0.1, 20, 100))
hi = np.array(np.linspace(0.01, 0.6, 100))
from itertools import product
Inputs = list(product(hi, kj,ki))
Inputs =np.array(Inputs)
hi=Inputs[:,0]
kj=Inputs[:,1]
ki=Inputs[:,2]
@vectorize
def M(ki,kj,hi):
return np.log(pow(ki, 1-theeta)*pow(hi, theeta) - kj + (1-delta)*ki)-kappa*pow(hi,1+1/v)/(1+1/v)
M = M(ki, kj,hi)
M = np.nan_to_num(M)
M[M == 0] = -100000
M=np.split(M,10000)
for i in range(0,10000):
M[i] =np.reshape(M[i],[1,100])
M=np.reshape(M,[10000,100])
M=np.transpose(M)
V=np.zeros([100,10000])
X = M + beta*V
X = np.nan_to_num(X)
X[X == 0.0000] = -100000
Vs1 = np.max(X, axis=1)
V=np.zeros([100,1])
diffVs = Vs1 - V
count = 0
#Loop:
while count <500:
Vs = Vs1
V=np.tile(Vs,100)
V=np.reshape(V,[10000,1])
V=np.tile(V,100)
V=np.transpose(V)
X = M + beta*V
X = np.nan_to_num(X)
Vs1 = np.amax(X, axis=1)
diffVs = Vs1 - Vs
count = count+ 1
#Plot the capital stock today w.r.t. Value function
ki = np.array(np.linspace(0.1, 20, 100))
plt.figure()
plt.plot(ki, Vs1)
plt.title('Value Function Iteration')
plt.ylabel('Value Function')
plt.xlabel('Capital stock of today')
plt.show()
| [
"matplotlib"
] |
46754a7812f06dd4642789f760a4734f1dcbd9db | Python | Zeekk9/Programas | /Cyl.py | UTF-8 | 602 | 3.21875 | 3 | [] | no_license | # This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Create the mesh in polar coordinates and compute corresponding Z.
r = np.linspace(0, 1.25, 50)
p = np.linspace(0, 2*np.pi, 50)
R, P = np.meshgrid(r, p)
Z = np.sqrt(1-R**2-P**2)
print(Z.shape)
# Express the mesh in the cartesian system.
X, Y = R*np.cos(P), R*np.sin(P)
# Plot the surface.
ax.plot_surface(X, Y, Z, cmap=plt.cm.YlGnBu_r)
plt.show()
| [
"matplotlib"
] |
5b59e25c30ae30a9e045535b80b7a09f636c9a0a | Python | olivierwitteman/Silverwing | /WaTT/Data/quick_plot.py | UTF-8 | 1,602 | 2.71875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
path = '.'
day = 8
def readinput(ns):
with open('./{!s}jan-Table 1.csv'.format(day), 'r') as d:
inputs = d.readlines()
linenumber, id, r_pwr, deflection = [], [], [], []
for i in range(len(inputs)):
try:
linenumber.append(int(inputs[i].replace(';', ',').split(',')[0][:]))
r_pwr.append(int(inputs[i].replace(';', ',').split(',')[4][:].strip()))
except:
linenumber = linenumber[:i-1]
r_pwr = r_pwr[:i-1]
p = []
for j in ns:
p.append(r_pwr[j])
p.append(r_pwr[j])
print p
return p
def readd():
f0s = []
rpm0s = []
p0s = []
ns = []
with open('./WaTT_{!s}jan.log'.format(day), 'r') as logfile:
dfs = logfile.readlines()
for i in range(len(dfs)):
f0s.append(float(dfs[i].split(',')[8]))
f0s.append(float(dfs[i].split(',')[9]))
rpm0s.append(float(dfs[i].split(',')[3]))
rpm0s.append(float(dfs[i].split(',')[4]))
p0s.append(float(dfs[i].split(',')[5])/2)
p0s.append(float(dfs[i].split(',')[5]) / 2)
ns.append(int(dfs[i].split(',')[0]))
colors = np.array(p0s)
return rpm0s, f0s, colors, ns
rpm0s, f0s, colors, ns = readd()
ps = readinput(ns=ns)
ps.append(20)
ps.append(40)
rpm0s.append(3000)
rpm0s.append(3600)
f0s.append(25)
f0s.append(30)
cs = np.array(ps)
plt.scatter(ps, rpm0s, c=f0s)
plt.ylabel('W0 [rpm]')
plt.xlabel('power setting [%pwm]')
plt.title('Colors show F0 [N]')
plt.colorbar()
plt.show() | [
"matplotlib"
] |
97dbe2e4691735e65fbc242bea845eac5760f287 | Python | antnieszka/Random-Peak-Simulation | /mcm/read_n_draw.py | UTF-8 | 1,939 | 2.765625 | 3 | [
"MIT"
] | permissive | from matplotlib import use
use("Qt5Agg")
import matplotlib.pyplot as plt
import numpy as np
import pickle
from os.path import join
class BullszitError(Exception):
pass
# Annealing
data_an = pickle.load(open(join("..", "res", "1464640516.0188088.p"), "rb")) # 10k
# data_an = pickle.load(open(join("..", "res", "1464643304.7194686.p"), "rb")) # 1k
# Monte Carlo
# data_mc = pickle.load(open(join("..", "res", "1464709910.6499007.p"), "rb")) # 1k
# data_mc = pickle.load(open(join("..", "res", "1464710211.0149205.p"), "rb")) # 10k
data_mc = pickle.load(open(join("..", "res", "1464710544.9451396.p"), "rb")) # better 10k
# data_mc = pickle.load(open(join("..", "res", "1464865217.9277039.p"), "rb")) # 100k
xa, ya = np.array(data_an).T
xm, ym = np.array(data_mc).T
fig = plt.figure()
ax = fig.add_subplot(111)
ax.annotate('Start %.f' % ya[0], xy=(0, ya[0]), xytext=(1250, 1000), arrowprops=dict(facecolor='black'))
hands = []
if data_an:
ya2 = []
best = ya[0]
for e in ya:
if e < best:
best = e
ya2.append(best)
a1, = ax.plot(xa, ya, label="SA score")
a2, = ax.plot(xa, ya2, label="SA best")
hands.append(a1)
hands.append(a2)
ax.annotate('End SA %.f' % ya2[-1], xy=(xa[-1], ya2[-1]), xytext=(7500, 440), arrowprops=dict(facecolor='black'))
if data_mc:
ym2 = []
best = ym[0]
for e in ym:
if e < best:
best = e
ym2.append(best)
# m1, = plt.plot(xm, ym, label="MC score")
m2, = ax.plot(xm, ym2, label="MC best")
# hands.append(m1)
hands.append(m2)
ax.annotate('End MC %.f' % ym2[-1], xy=(xm[-1], ym2[-1]), xytext=(8000, 900), arrowprops=dict(facecolor='black'))
from scipy.signal import medfilt
ym = medfilt(ym, kernel_size=21)
m1, = ax.plot(xm, ym, label="MC score")
hands.append(m1)
plt.xlabel("Time")
plt.ylabel("Quality")
ax.legend(handles=hands)
# plt.savefig("../res/10k_result.png")
plt.show()
| [
"matplotlib"
] |
5355681202baed89e882a6548ab239535bff7800 | Python | NikNo280/TVIMS1 | /part3_3.py | UTF-8 | 1,065 | 3.234375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from creature_Y import get_Y
np.random.seed(666)
n = 50
Y = get_Y(n)
fig, ax = plt.subplots(1, 1, figsize=(16, 9))
Fn = []
F = []
my_squared_deviation = 0
for i in range(1, n+1):
Fn.append((i - 0.5) / n)
F.append(((Y[i-1])**3+1)/2)
my_squared_deviation += ((Fn[i - 1] - F[i - 1]) ** 2)
my_squared_deviation += 1 / (12 * n)
table_my_squared_deviation = 0.461# a = 0.05
if my_squared_deviation > table_my_squared_deviation:
print(my_squared_deviation.__str__() + " > " + table_my_squared_deviation.__str__() + "\nСледовательно: ")
print("Выборка не соответствует теоретическому закону распределения по критерию Мизеса")
else:
print(my_squared_deviation.__str__() + " < " + table_my_squared_deviation.__str__() + "\nСледовательно: ")
print("Выборка соответствует теоретическому закону распределения по критерию Мизеса") | [
"matplotlib"
] |
1280cfb82e428cefd359e7fef4d46e778fce9868 | Python | copperdong/AudioSegment | /tests/filterbank.py | UTF-8 | 1,803 | 2.8125 | 3 | [
"MIT"
] | permissive | """
Tests creation of an FFT and plotting of it.
"""
import sys
sys.path.insert(0, '../')
import audiosegment
import platform
import os
import unittest
if os.environ.get('DISPLAY', False):
import matplotlib
if platform.system() != "Windows":
matplotlib.use('qt5agg')
import matplotlib.pyplot as plt
# If we are running in Travis and are Python 3.4, we can't use this function,
# so let's skip the test
skip = False
try:
import librosa
cannot_import_librosa = False
except ImportError:
cannot_import_librosa = True
if cannot_import_librosa and os.environ.get("TRAVIS", False) and sys.version_info.minor < 5:
print("Skipping filterbank test for this version of python, as we cannot import librosa.")
skip = True
def visualize(spect, frequencies, title=""):
"""Visualize the result of calling seg.filter_bank() for any number of filters"""
i = 0
for freq, (index, row) in zip(frequencies[::-1], enumerate(spect[::-1, :])):
plt.subplot(spect.shape[0], 1, index + 1)
if i == 0:
plt.title(title)
i += 1
plt.ylabel("Amp @ {0:.0f} Hz".format(freq))
plt.plot(row)
plt.show()
@unittest.skipIf(skip, "Can't run this test for this version of Python.")
class TestFilterBank(unittest.TestCase):
def test_visualize(self):
seg = audiosegment.from_file("furelise.wav")[:25000]
spec, freqs = seg.filter_bank(nfilters=5, mode='log')
if os.environ.get('DISPLAY', False):
visualize(spec, freqs)
def test_no_exceptions(self):
seg = audiosegment.from_file("furelise.wav")[:25000]
_spec, _freqs = seg.filter_bank(nfilters=5, mode='mel')
_spec, _freqs = seg.filter_bank(nfilters=5, mode='log')
if __name__ == "__main__":
unittest.main()
| [
"matplotlib"
] |
0e6cd32a7ca187df483f93dbd272383a612919cf | Python | Zebedeusz/MusicClustering | /feature_extraction/Utilities.py | UTF-8 | 3,571 | 2.90625 | 3 | [] | no_license | # returns array of shape (f,t) e.g. (1025,1295)
def preprocessToKeplerUniFeatures(sound):
from scipy.signal import stft
import numpy
f = 22050
# samplerate to 22kHz
# sound = sound.set_frame_rate(f)
# STFT - window size 2048 samples, hop 512 samples, Hanning
f, t, sound_stft = stft(sound, fs=1.0, window="hann", nperseg=2048, noverlap=512)
# magnitude spectrum
sound_stft_sole = abs(sound_stft)
# linear resolution to logarithmic Cent scale
cent_const = 440 * (pow(2, -57 / 12))
sound_cent_scale = 1200 * numpy.log2(sound_stft_sole / cent_const)
sound_cent_scale = numpy.nan_to_num(sound_cent_scale)
# to logarithmic scale again
sound_log_cent_scale = 20 * numpy.log10(sound_cent_scale)
sound_log_cent_scale = numpy.nan_to_num(sound_log_cent_scale)
# normalization
# switched off as better image without it
# row_sums = sound_log_cent_scale.sum(axis=0)
# sound_log_cent_scale = sound_log_cent_scale / row_sums[numpy.newaxis, :]
# import matplotlib.pyplot as plt
#
# plt.pcolormesh(t, f, sound_log_cent_scale)
# plt.title('STFT Magnitude')
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()
return sound_log_cent_scale
def reduce_frequency_bands(wavedata, freq_bands):
import numpy
freq_band_size = len(wavedata) // freq_bands
wavedata_with_reduced_freq_bands = numpy.zeros(shape=(freq_bands, wavedata.shape[1]))
for freq_band_index in range(freq_bands - 1):
wavedata_with_reduced_freq_bands[freq_band_index] = \
numpy.sum(
wavedata[freq_band_index * freq_band_size:(freq_band_index + 1) * freq_band_size, :], axis=0) \
/ freq_band_size
return wavedata_with_reduced_freq_bands
def percentile_element_wise_for_3d_array(array, percentile):
# percentile as 2d-array calculated element-wise
import math
import numpy
perc = numpy.asarray(array[0])
for i in range(perc.shape[0]):
for j in range(perc.shape[1]):
sorted_i_j_array = numpy.sort(array[:, i, j])
perc[i, j] = sorted_i_j_array[math.floor(percentile * len(sorted_i_j_array))]
return perc
def sort_freq_bands_in_blocks(array, block_size, hop_size):
import numpy
blocks_with_sorted_freq_bands = numpy.zeros(shape=(array[1].size // hop_size + 1, len(array), block_size))
k = 0
for i in range(0, array[1].size, hop_size):
block = array[:, i:(i + block_size)]
sorted_sblock = numpy.zeros(shape=(len(array), block_size))
for j in range(len(block)):
sorted_sblock[j, 0:len(block[j])] = sorted(block[j])
blocks_with_sorted_freq_bands[k] = sorted_sblock
k += 1
return blocks_with_sorted_freq_bands
def varianceblock(data):
import numpy
mean_block = meanblock(data)
time_blocks = len(data)
variance_init = 0
for time_block_index in range(time_blocks):
variance_init += numpy.power(data[time_block_index] - mean_block, 2)
return variance_init / time_blocks
def meanblock(data):
import numpy
time_blocks = len(data)
summed_block = numpy.zeros((len(data[0]), len(data[0][0])))
for time_block in range(time_blocks - 1):
summed_block = sum_2d_arrays(summed_block, data[time_block])
mean_block = summed_block / time_blocks
return mean_block
def sum_2d_arrays(arr1, arr2):
for i in range(len(arr2)):
for j in range(len(arr2[0])):
arr1[i][j] = arr1[i][j] + arr2[i][j]
return arr1
| [
"matplotlib"
] |
5a51935562bf5d6d760786e2f6c7ce97e66c7ec5 | Python | priyabask11/ConnectMe | /friendlinkAlgorithmImplementation.py | UTF-8 | 4,590 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 13:36:16 2018
@author: RAJKUMAR
"""
from collections import defaultdict
#import matplotlib.pyplot as plt
import csv
import networkx as nx
G = nx.Graph()
import time
start_time = time.time()
def read():
fields = []
u = fields[0]
v = fields[1]
rows = []
filename = r"C:\Users\PSK\Documents\Priya Docs\python_pack\trust_sample.csv"
with open(filename) as csvfile:
#print("hii")
csvreader = csv.reader(csvfile, delimiter=',')
fields = next(csvreader);
for row in csvreader:
rows.append(row)
print('Field names are:' + ', '.join(field for field in fields))
print(u)
print(v)
def printsim(sim):
s = ""
min1 = 100
for i in range(1,len(sim)):
min1 = 100
for j in range(1,len(sim)):
if(sim[i][j] < 0.01):
sim[i][j] = 0.0
s = s+str(round(sim[i][j],4)) + " "
if(sim[i][j] < min1 and sim[i][j] != 0.0) :
min1 = round(sim[i][j],4)
for j in range(1,len(sim)):
if(round(sim[i][j],4) > min1):
#G.add_nodes_from([1,10])
G.add_edge(i,j)
pos = nx.spring_layout(G,k=0.50,iterations=20)
nx.draw(G,pos,with_labels = True)
s = s+"\n"
print(s)
class Graph:
def __init__(self,vertices):
self.V= vertices
self.graph = defaultdict(list)
def addEdge(self,u,v):
self.graph[u].append(v)
def printAllPathsUtil(self, u, v, visited, path):
global updateflag
global lengths
global unow,vnow
global paths
path.append(u)
visited[u]= True
if(u == v):
lengths.append(len(path)-1)
#print str(unow)+","+str(vnow)+": "+"Pathlength: "+str((len(path)-1))
#print(path)
if(updateflag == 1):
#print "yesupdate"
paths[unow][vnow][len(path)-1] = paths[unow][vnow][len(path)-1] + 1
#print paths[unow][vnow][len(path)-1]
else:
for i in self.graph[u]:
if visited[i] == False:
self.printAllPathsUtil(i, v, visited, path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[u]= False
def printAllPaths(self,u,v):
visited = [False]*(self.V)
path = []
self.printAllPathsUtil(u,v,visited,path)
return path
def computeL(self,n):
global lengths, unow, vnow
for i in range(n):
for j in range(n):
unow = i
vnow = j
self.printAllPaths(i,j)
return max(lengths)
def updateMatrix (self,n):
global updateflag, unow,vnow
updateflag = 1
for i in range(n):
for j in range(n):
unow = i
vnow = j
self.printAllPaths(i,j)
updateflag = 0
def computeSimilarity(self,m,n,paths):
global sim
for i in range(n):
for j in range(n):
deno = 1
for k in range(2,(m+1)):
deno = deno * (n - k)
sim[i][j] = sim[i][j] + (1/((m-1)*1.0)) * ((paths[i][j][m])/(deno*1.0))
def main(self,n,l,paths):
for m in range(2,(l+1)):
self.computeSimilarity(m,n,paths)
n = int(input("Enter the no of nodes"))
g = Graph(n)
sim = [[0] * n for i in range (n)]
lengths = []
updateflag = 0
filename = r"C:\Users\PSK\Documents\Priya Docs\python_pack\trust_sample.csv"
with open(filename) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
cnt = 0
for row in csv_reader:
if(cnt == 0):
cnt = 1
continue
u = int(row[0])
v = int(row[1])
if(u < n and v < n):
g.addEdge(u,v)
unow = 0
vnow = 0
l = g.computeL(n)
paths = [[[0 for m in range(l+1)] for i in range(n)] for j in range(n)]
g.updateMatrix(n)
g.main(n,l,paths)
printsim(sim)
print("--- %s seconds ---" % (time.time() - start_time))
| [
"matplotlib"
] |
26c4dcb99f68b79d4ffe01af24c72f1fe209f85c | Python | bernardocarvalho/sad-codes | /fftSignal.py | UTF-8 | 1,562 | 2.84375 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 10 15:09:38 2018
@author: bernardo
"""
import matplotlib.pyplot as plt
import numpy as np
#%matplotlib auto
#%pylab
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 10,
}
dt=np.genfromtxt('data_files/90us_sinal2.txt',dtype=np.int16)
Fs=1.0/90e-6
#dt=np.genfromtxt('data_files/173hz_sinal2.txt',dtype=np.int16)
# 2*173 - 420 = -74
# 3*173 - 460 = 59
# 3*173 - 440 = 79
#Fs=173.0
#dt=np.genfromtxt('data_files/173hz_sinalcomplexo.txt',dtype=np.int16)
#Fs=173.0
Ts=1/Fs
dt = dt - 512
signal=dt[:,1]
n = len(signal) # length of the signal
#plt.plot(x, signal )
# matplotlib.org/examples/pylab_examples/subplots_demo.html
# Two subplots, the axes array is 1-d
plt.close('all')
f, (ax1, ax2) = plt.subplots(2, sharex=False)
Y = np.fft.fft(signal)/n # fft computing and normalization
Y = Y[range(int(n/2))] # n//2 perform integer division
#fig, ax = plt.subplots(2, 1)
k= np.arange(n)
T= n/Fs
frq= k/T # two sides frequency range
frq=frq[range(int(n/2))] # one side frequency range
#plt.clf()
ax1.plot(signal,'b') # plotting the spectrum
ax2.plot(frq,abs(Y),'r') # plotting the spectrum
ax2.set_xlabel('Freq / Hz')
#plt.text(40, 20, r'$\cos(2 \pi t) \exp(-t)$', fontdict=font)
#ax2.text(40, 20, r'3*173-460=59', fontdict=font)
#ax2.text(52, 25, r'2*173-420=-74', fontdict=font)
#ax2.text(55, 60, r'3*173-440=79', fontdict=font)
#plt.title('complex signal ')
#plt.title(r'AM Signal, 440 $\pm$ 20 Hz')
#plt.grid()
plt.show()
| [
"matplotlib"
] |
784f16060b4b37c439e7352b64b30f2406d65d27 | Python | KevinJ-Huang/Stactic_learning_homework | /4_ridge_regression.py | UTF-8 | 1,574 | 3.15625 | 3 | [] | no_license | import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
def generate():
y = []
x = []
base= 0.041
e = np.random.normal(0, 0.3, 25)
for i in range(25):
x.append(i*base)
y.append(math.sin(2*math.pi*x[i]))
Y = y
y = y+e
return x,y,Y
def coffecient(lamda):
x,y,Y = generate()
X = np.zeros(shape=[25,8])
for i in range(25):
for j in range(8):
X[i,j] = x[i]**j
XT = X.T
I = np.eye(8)
coef = np.dot(np.dot((np.matrix(np.dot(XT,X)+lamda*I)).I,XT),y)
# clf = Ridge(lamda,fit_intercept=False)
# clf.fit(X,y)
# coef = clf.coef_
return coef,X,x
# plt.figure()
# for i in range(100):
# x,y,Y = generate()
# plt.plot(x,y)
# plt.show()
coef,X,x = coffecient(0.01)
print(coef)
def select(lamda):
# plt.figure()
y_sum = np.zeros(shape=[25])
for i in range(100):
y_data = []
coef,X,x = coffecient(lamda)
for i in range(25):
y = 0
for j in range(8):
y = y+coef[0,j]*(X[i,j])
y_data.append(y)
y_sum = y_sum+y_data
y_mean = y_sum / 100
# plt.plot(x,y_data,color = 'r')
return x,y_mean,y_data
plt.figure()
# lamda = 0.0001
# x, y_mean = select(lamda)
# plt.plot(x, y_mean, label=lamda)
for lamda in 0.001,0.01,0.1,1:
x,y_mean,y_data = select(lamda)
plt.plot(x,y_mean,label = lamda)
plt.xlabel('x')
plt.ylabel('y')
x,y,Y = generate()
plt.plot(x,Y,label = 'groundtruth',color='black')
plt.legend()
plt.show()
| [
"matplotlib"
] |
23fbcdd8cb6e1381b7352b14db45aa8e23f87265 | Python | PrismaH/tetrisRL | /dqn_agent.py | UTF-8 | 14,711 | 2.625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import math
import random
import sys
import os
import shutil
import numpy as np
#import matplotlib
#import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from copy import deepcopy
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=Warning)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import CyclicLR
from ranger import Ranger
from engine import TetrisEngine
width, height = 10, 20 # standard tetris friends rules
engine = TetrisEngine(width, height)
# set up matplotlib
#is_ipython = 'inline' in matplotlib.get_backend()
#if is_ipython:
#from IPython import display
#plt.ion()
# if gpu is to be used
use_cuda = torch.cuda.is_available()
if use_cuda:print("....Using Gpu...")
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
#Tensor = FloatTensor
######################################################################
# Replay Memory
# -------------
# - ``Transition`` - a named tuple representing a single transition in
# our environment
# - ``ReplayMemory`` - a cyclic buffer of bounded size that holds the
# transitions observed recently. It also implements a ``.sample()``
# method for selecting a random batch of transitions for training.
#
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
x = x * (torch.tanh(F.softplus(x)))
return x
class MBConv(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size, expand_ratio, stride=1, padding=1):
super(MBConv, self).__init__()
self.channels_in = channels_in
self.channels_out = channels_out
self.stride = stride
self.conv1 = nn.Conv2d(channels_in, channels_in*expand_ratio, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(channels_in*expand_ratio)
self.conv2 = nn.Conv2d(channels_in*expand_ratio, channels_in*expand_ratio, groups=channels_in*expand_ratio, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn2 = nn.BatchNorm2d(channels_in*expand_ratio)
self.conv3 = nn.Conv2d(channels_in*expand_ratio, channels_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(channels_out)
self.mish = Mish()
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.mish(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mish(x)
x = self.conv3(x)
x = self.bn3(x)
if self.channels_in == self.channels_out and self.stride == 1:
x = x + shortcut
return x
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
#activ func
self.mish = Mish()
#First Conv
channels_in = 1
self.conv1 = nn.Conv2d(channels_in, int(round(32 * 1.4)), kernel_size=3, stride=2, bias=False, padding=1)
self.bn1 = nn.BatchNorm2d(int(round(32 * 1.4)))
self.layer1 = self.build_block(channels_in=32, channels_out=16, kernel_size=3, depth=1, stride=1, expand_ratio=1, padding=1)
self.layer2 = self.build_block(channels_in=16, channels_out=24, kernel_size=3, depth=2, stride=2, padding=1)
self.layer3 = self.build_block(channels_in=24, channels_out=40, kernel_size=5, depth=2, stride=1, padding=2)
self.layer4 = self.build_block(channels_in=40, channels_out=80, kernel_size=3, depth=3, stride=1, padding=1)
self.layer5 = self.build_block(channels_in=80, channels_out=112, kernel_size=5, depth=3, stride=1, padding=2)
self.layer6 = self.build_block(channels_in=112, channels_out=192, kernel_size=5, depth=4, stride=2, padding=2)
self.layer7 = self.build_block(channels_in=192, channels_out=320, kernel_size=3, depth=1, stride=1, padding=1)
self.conv2 = nn.Conv2d(int(round(320 * 1.4)), int(round(1280 * 1.4)), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(int(round(1280 * 1.4)))
self.fc1 = nn.Linear(int(round(1280 * 1.4)), engine.nb_actions)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def build_block(self, channels_in, channels_out, kernel_size, depth, stride, expand_ratio=6, padding=1):
block_list = []
for _ in range(int(round(depth * 1.8))):
block_list.append(MBConv(int(round(channels_in * 1.4)), int(round(channels_out * 1.4)), kernel_size=kernel_size, expand_ratio=expand_ratio, stride=stride, padding=padding))
channels_in = channels_out
stride = 1
return nn.Sequential(*block_list)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.adaptive_avg_pool2d(x, 1)
x = self.fc1(x.view(x.size(0), -1))
return x
######################################################################
# Training
# --------
#
# Hyperparameters and utilities
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# This cell instantiates our model and its optimizer, and defines some
# utilities:
#
# - ``Variable`` - this is a simple wrapper around
# ``torch.autograd.Variable`` that will automatically send the data to
# the GPU every time we construct a Variable.
# - ``select_action`` - will select an action accordingly to an epsilon
# greedy policy. Simply put, we'll sometimes use our model for choosing
# the action, and sometimes we'll just sample one uniformly. The
# probability of choosing a random action will start at ``EPS_START``
# and will decay exponentially towards ``EPS_END``. ``EPS_DECAY``
# controls the rate of the decay.
#
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
CHECKPOINT_FILE = 'checkpoint.pth.tar'
steps_done = 0
model = DQN()
print(model)
if use_cuda:
model.cuda()
loss = nn.MSELoss()
optimizer = Ranger(model.parameters(), lr=.001)
scheduler = CyclicLR(optimizer, base_lr=0.01, max_lr=0.06, mode='triangular', cycle_momentum=False)
memory = ReplayMemory(3000)
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
return model(
Variable(state, requires_grad=False).type(FloatTensor)).data.max(1)[1].view(1, 1)
else:
return FloatTensor([[random.randrange(engine.nb_actions)]])
episode_durations = []
'''
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
'''
######################################################################
# Training loop
# ^^^^^^^^^^^^^
#
# Finally, the code for training our model.
#
# Here, you can find an ``optimize_model`` function that performs a
# single step of the optimization. It first samples a batch, concatenates
# all the tensors into a single one, computes :math:`Q(s_t, a_t)` and
# :math:`V(s_{t+1}) = \max_a Q(s_{t+1}, a)`, and combines them into our
# loss. By defition we set :math:`V(s) = 0` if :math:`s` is a terminal
# state.
last_sync = 0
def optimize_model():
global last_sync
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = ByteTensor(tuple(map(lambda s: s is not None,
batch.next_state)))
# We don't want to backprop through the expected action values and volatile
# will save us on temporarily changing the model parameters'
# requires_grad to False!
non_final_next_states = Variable(torch.cat([s for s in batch.next_state
if s is not None]))
state_batch = Variable(torch.cat(batch.state))
action_batch = Variable(torch.cat(batch.action))
reward_batch = Variable(torch.cat(batch.reward))
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = model(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = Variable(torch.zeros(BATCH_SIZE).type(FloatTensor))
with torch.no_grad():
next_state_values[non_final_mask] = model(non_final_next_states).max(1)[0]
# Now, we don't want to mess up the loss with a volatile flag, so let's
# clear it. After this, we'll just end up with a Variable that has
# requires_grad=False
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values.squeeze(1), expected_state_action_values)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
if len(loss.data.size())>0 : return loss.data[0]
else : return loss
def optimize_supervised(pred, targ):
optimizer.zero_grad()
diff = loss(pred, targ)
diff.backward()
optimizer.step()
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def load_checkpoint(filename):
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
try: # If these fail, its loading a supervised model
optimizer.load_state_dict(checkpoint['optimizer'])
memory = checkpoint['memory']
except Exception as e:
pass
# Low chance of random action
#steps_done = 10 * EPS_DECAY
return checkpoint['epoch'], checkpoint['best_score']
if __name__ == '__main__':
# Check if user specified to resume from a checkpoint
start_epoch = 0
best_score = 0
if len(sys.argv) > 1 and sys.argv[1] == 'resume':
if len(sys.argv) > 2:
CHECKPOINT_FILE = sys.argv[2]
if os.path.isfile(CHECKPOINT_FILE):
print("=> loading checkpoint '{}'".format(CHECKPOINT_FILE))
start_epoch, best_score = load_checkpoint(CHECKPOINT_FILE)
print("=> loaded checkpoint '{}' (epoch {})"
.format(CHECKPOINT_FILE, start_epoch))
else:
print("=> no checkpoint found at '{}'".format(CHECKPOINT_FILE))
######################################################################
#
# Below, you can find the main training loop. At the beginning we reset
# the environment and initialize the ``state`` variable. Then, we sample
# an action, execute it, observe the next screen and the reward (always
# 1), and optimize our model once. When the episode ends (our model
# fails), we restart the loop.
f = open('log.out', 'w+')
for i_episode in count(start_epoch):
# Initialize the environment and state
state = FloatTensor(engine.clear()[None,None,:,:])
score = 0
for t in count():
# Select and perform an action
action = select_action(state).type(LongTensor)
# Observations
last_state = state
state, reward, done = engine.step(action[0,0])
state = FloatTensor(state[None,None,:,:])
# Accumulate reward
score += int(reward)
reward = FloatTensor([float(reward)])
# Store the transition in memory
memory.push(last_state, action, state, reward)
# Perform one step of the optimization (on the target network)
if done:
# Train model
if i_episode % 10 == 0:
log = 'epoch {0} score {1}'.format(i_episode, score)
print(log)
f.write(log + '\n')
loss = optimize_model()
print('loss: {}'.format(loss))
# Checkpoint
if i_episode % 100 == 0:
is_best = True if score > best_score else False
save_checkpoint({
'epoch' : i_episode,
'state_dict' : model.state_dict(),
'best_score' : best_score,
'optimizer' : optimizer.state_dict(),
'memory' : memory
}, is_best)
break
f.close()
print('Complete')
#env.render(close=True)
#env.close()
#plt.ioff()
#plt.show()
| [
"matplotlib"
] |
e7d97efb8494e1ae293226976a274bbec7ba5079 | Python | bongkokwei/quantum-loops | /fockStateGen.py | UTF-8 | 4,799 | 3.03125 | 3 | [] | no_license | # Simulation based on Engelkemeier et. al PHYSICAL REVIEW A 102, 023712 (2020)
# Density matrix and measurement operator can be represented as a vector with
# with the coeff in the first element and variable in the second element
# (Pk, xk) = sum(Pk * E(xk)) = rho (Eqn 15)
# We can do the same for Measurement operator = sum(Q_l * E(omega_l))
# This version produces results which fit the paper
import numpy as np
from numpy import random
import math
import matplotlib.pyplot as plt
import scipy.special as sp
from matplotlib.widgets import Slider, Button, RadioButtons
random.seed(654)
def IsNPArray(arr):
# Check if array is np.ndarray
# If not true, then cast arr to np array.
if isinstance(arr, (int,float)):
return np.array([arr])
else:
return np.array(arr)
def initDensityMatrix(numTerms):
# Generating random coeffs such that they sum to unity
P = np.sort(random.dirichlet(np.ones(numTerms)))[::-1]
x = random.uniform(size = numTerms)
return np.stack((P,x), axis=1)
def gainFactor(squeezeParam):
return np.cosh(squeezeParam)**2
def loopConfigOutput(inputRho, heraldRho, squeezeParam):
# From Eqn 31
# squeezeParam is an arrray
# The first two index of the output matrix represents the resulting density
# matrix, the third index represents the squeezeParam
squeezeParam = IsNPArray(squeezeParam)
inputSize = inputRho.shape # (NUMSAMP, k, 2)
heraldSize = heraldRho.shape
numsamp = len(squeezeParam)
gamma = gainFactor(squeezeParam)
pre = np.einsum('ij,ik,i->ijk', inputRho[:,:,0], heraldRho[:,:,0], 1/gamma)
# init variable
v = lambda xk, zl, gamma: (xk + (gamma-1)*zl)/gamma
var = np.array([[[v(x, z, gamma[i])
for z in heraldRho[i,:,1]]
for x in inputRho[i,:,1]]
for i in np.arange(numsamp)])
prefactor = pre.reshape(numsamp, pre.shape[1]*pre.shape[2])
variable = var.reshape(numsamp, var.shape[1]*var.shape[2])
return np.stack((prefactor, variable), axis = 2)
def outputAfterTRoundTrip(inputRho, heraldRho, squeezeParam, T):
# instead of calling loopConfigOutput in main script, user just have to call
# this function once and the output will be in the form of dictionary.
squeezeParam = IsNPArray(squeezeParam)
rho1R = loopConfigOutput(inputRho, heraldRho, squeezeParam)
rhoOut = {'rho1R':rho1R}
hash = 'rho{:0.0f}R'
if T == 1:
return rhoOut
for i in np.arange(2,T+1):
rhoOut[hash.format(i)] = \
loopConfigOutput(rhoOut[hash.format(i-1)], heraldRho, squeezeParam)
return rhoOut
def expectationValue(rho, M):
# the trace of operator acting on a density matric can evaluated with Eqn 20,
# sum_(k,l)(Pk*Ql/(1-xk*wl))
PQ = np.einsum('ij,k->ijk',rho[:,:,0], M[:,0])
xw = np.einsum('ij,k->ijk',rho[:,:,1], M[:,1])
expVal = np.einsum('ijk,ijk->i', PQ, 1/(1-xw))
return expVal
def successProb(inputRho, outputRho):
# Success probability = tr(rho_out)/tr(rho_in) =
# (rho_out, identity)/(rho_in, identity)
# (rho, operator) defines the inn-product type functional
id = np.array([[1,1]])
successProb = expectationValue(outputRho, id)/expectationValue(inputRho, id)
return successProb
def fidelity(rho, n):
# Eqn 17 sum(Pk*xk^n)
return np.einsum('ij,ij->i',rho[:,:,0], rho[:,:,1]**n)
def lossChannel(rho, quantumEff):
# Eqn 27
prefactor = rho[:,:,0]/((1-(1-quantumEff)*rho[:,:,1]))
variable = quantumEff*rho[:,:,1]/((1-(1-quantumEff)*rho[:,:,1]))
return np.stack((prefactor, variable), axis=1)
def psuedoPNR(numDetector, kClicks, detectorEff):
# Generate psuedo PNR measurement operator given by eqn 21
# kClicks = num of clicks corresponding to photon-number state
J = np.arange(1,kClicks+1)
prefactor = sp.binom(numDetector, kClicks)*\
sp.binom(kClicks, J)*(-1)**(kClicks-J)
variable = J/numDetector
detectorPOVM = np.stack((prefactor, variable), axis=1)
return detectorPOVM
def getSqueezeParam(pumpPower, beamRad, xEff, refractiveIdx, crystalLen, \
pumpWavelength):
epsilon = 8.8541878128E-12
C = 299792458
pumpIntensity = pumpPower/(np.pi*beamRad**2)
fieldAmp = np.sqrt(pumpIntensity/(2*refractiveIdx*epsilon*C))
angFreq = (2*np.pi)*(C/pumpWavelength)
return ((xEff*angFreq)/(refractiveIdx*C))*fieldAmp*crystalLen
###############################################################################
# pump = np.linspace(50, 2000, 20)*1E-3 #W
# plt.plot(pump*1E3, getSqueezeParam(pump, 150E-6, 14E-12, 1.8, 10E-3, 775E-9),
# '.')
# plt.xlabel('Pump Power (mW)')
# plt.ylabel('$|\zeta|$', fontsize = 20)
# plt.savefig('.\data\zetaVsPump.eps', format='eps', transparent=True)
# plt.show()
| [
"matplotlib"
] |
6ed08fd0be004e7c5c81b7b0780683ad8d283cf0 | Python | yankaics/Tianchi-2016-music | /src/concatenate.py | UTF-8 | 1,174 | 2.765625 | 3 | [] | no_license | import cPickle as cp
import matplotlib.pyplot as plt
prediction_l = open('p2_ets_result_2.csv', 'r').readlines()
# use cPickle.load(<file>) to load object from a file
artists = cp.load(open('cp_artists.txt','r'))
daily_play = cp.load(open('cp_daily_play.txt'))
daily_down = cp.load(open('cp_daily_down.txt'))
daily_col = cp.load(open('cp_daily_col.txt'))
# 20150212
datestr = cp.load(open('cp_datestr.txt'))
future_dates = cp.load(open('cp_future_dates.txt'))
prediction = {}
for artist in artists:
prediction[artist] = []
for line in prediction_l:
prediction[line.split(',')[0]].append(float(line.split(',')[1]))
print "Enter :q to quit, enter a number from 1 - 100 to view the prediction of the artist."
while True:
q = raw_input()
if q == ":q":
print "bye"
break
else:
plt.cla()
artist = artists[int(q)-1]
plt.plot(range(len(daily_play[artist])),daily_play[artist])
i = 0
for date in datestr:
print date,daily_play[artist][i]
i += 1
plt.plot(range(len(daily_play[artist]),len(daily_play[artist]) + 60),prediction[artist], color='red')
plt.show()
| [
"matplotlib"
] |
1214a07119d061104d2d2c2c9efc07b4315db129 | Python | tavolivos/Binding-Energy-Frecuency | /e-frecuency.py | UTF-8 | 1,722 | 2.890625 | 3 | [] | no_license | ###################################
# By: Gustavo E. Olivos-Ramirez #
# [email protected] #
# Lima-Peru #
###################################
import subprocess
import os
import matplotlib.pyplot as pl
import pandas as pd
import numpy as np
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 22}
pl.rc('font', **font)
labels = ['0', '', '1K', '', '2K', '', '3K', '', '4K']
df = pd.read_csv('energies.csv')
bins = [-9, -8, -7, -6, -5, -4, -3,]
names = ['[-8,-9]', '[-7,-8]', '[-6,-7]', '[-5,-6]', '[-4,-5]', '[-3,-4]']
df['Frecuency'] = pd.cut(df['BindingEnergy'], bins, labels = names)
fig = pl.figure(figsize=(13,6))
ax = fig.add_subplot()
fig.subplots_adjust(bottom=0.15, left=0.15)
df.Frecuency.value_counts(sort=False, normalize=False).plot(kind='barh', color='salmon')
barplot = df.Frecuency.value_counts(sort=False, normalize=False)
for i, v in enumerate(barplot):
ax.text(v, i, str(v), color='black', fontweight='bold', size='14', va='center')
ax.set_xlabel("Number of ligands (per thousand)")
ax.set_ylabel("Binding Energy\n(kcal/mol)")
ax.set_xlim(0, 4000)
ax.set_xticklabels(labels)
h, l = ax.get_legend_handles_labels()
ax.legend(l)
#ax.tick_params(direction='out', length=6, width=2, colors='r', grid_color='r', grid_alpha=0.5)
subprocess.call ('mkdir IMG-FREC', shell = True)
fig.savefig("IMG-FREC/e-frec.png", format='png', dpi=800, transparent=True)
fig.savefig("IMG-FREC/e-frec.svg", format='svg', dpi=800, transparent=True)
fig.savefig("IMG-FREC/e-frec.pdf", format='pdf', dpi=800, transparent=True)
fig.savefig("IMG-FREC/e-frec.tif", format='tif', dpi=800, transparent=True)
print("Enjoy your plots")
print("Great power brings great responsibility")
| [
"matplotlib"
] |
c014c36f27b6b9cdd993eb03b25924c68d0bb76b | Python | JulV94/ELECH309 | /speedProfile.py | UTF-8 | 1,081 | 3.515625 | 4 | [] | no_license | #!/usr/bin/python
import matplotlib.pyplot as plt
from math import sqrt
def genPosReference(a, d, speed, dt):
if (speed*speed/a < d):
# Trapèze
if (dt < speed/a):
# Acceleration part
return a*dt*dt/2
elif (dt < d/speed):
# Flat speed part
return speed*dt - speed*speed/(2*a)
else:
# Deceleration part
new_dt = dt - d/speed
return d - speed*speed/(2*a) + speed*new_dt - a*new_dt*new_dt/2
#return speed*dt - speed*speed/(2*a)
else:
# Triangle
if (dt*dt < d/a):
# Acceleration part
return a*dt*dt/2
else:
# Deceleration part
new_dt = dt - sqrt(d/a)
return d/2 + a*sqrt(d/a)*new_dt - a*new_dt*new_dt/2
x = []
y = []
d = 0.6
i = 0
pos = genPosReference(0.5, d, 0.4, i)
while (pos < d and i < 3):
x.append(i)
y.append(pos)
i += 0.01
pos = genPosReference(0.5, d, 0.4, i)
plt.plot(x, y)
plt.xlabel('t (s)')
plt.ylabel('Distance (m)')
plt.show()
| [
"matplotlib"
] |
215495d0baa54ac8cbdceed25424914117d4ff88 | Python | angelo6792/PHSX815_Week2 | /python/Distribution_plot.py | UTF-8 | 646 | 3.203125 | 3 | [] | no_license | #! /usr/bin/env python
# imports of external packages to use in our code
import sys
import numpy as np
import matplotlib.pyplot as plt
import math
#import random
from Random2 import Random
random = Random()
#loop Rayleigh through an empty array to create distribution
n=100000
a = []
for x in range(0,n):
a.append(random.Rayleigh())
#plot array
n, bins, patches = plt.hist(a, 50, density=True, facecolor='g', alpha=0.75)
plt.xlabel('x', fontsize = 16, color = "blue")
plt.ylabel('Probability', fontsize = 16, color = "red")
plt.title('Rayleigh distribution')
#plt.legend(['cool green bars'], loc = 'upper left')
plt.grid(True)
plt.show()
| [
"matplotlib"
] |
1053d6f106df26660415149a0830eb09118f330f | Python | tristanfcraig/AM-115-Project-1 | /2016_Dem_Primary.py | UTF-8 | 1,249 | 2.921875 | 3 | [] | no_license | import sys
import csv
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import matplotlib.style as style
style.use('fivethirtyeight')
if len(sys.argv) > 1:
if len(sys.argv[1]) == 2:
states = [sys.argv[1]]
else:
states = ['AK','AR','AZ','CA','CO','CT','FL','GA','IA','IL','KY','LA','MD','MI','MN','MO','NC','NH','NJ','NV','NY','OH','OK','PA','TN','TX','UT','VA','WI']
# start_date =
# end_date =
for state in states:
in_file = 'Data/' + state + '.csv'
try:
df = pd.read_csv(in_file, sep=',')
except IOError:
exit("No Data :(")
df = df[df['Sanders'].astype(str).str.contains('-')==False]
df = df[df['Clinton'].astype(str).str.contains('-')==False]
clinton = df['Clinton'].tolist()
sanders = df['Sanders'].tolist()
t = []
spread = []
for i in range(0,len(clinton)):
spread.append(abs(float(clinton[i]) - float(sanders[i])))
t.append(i)
spread = spread[::-1]
plt.figure(figsize=(15, 10))
title = 'Clinton vs. Sanders Polling Results (' + state + ')'
plt.title(title)
plt.autoscale()
plt.ylabel('% Spread')
plt.xlabel('Time')
plt.tick_params(labelbottom=False)
plt.ylim(0,100)
plt.plot(t,spread)
out_file = 'Output/' + state + '.png'
plt.savefig(out_file)
| [
"matplotlib"
] |
d2dc0c27a0d773c92a9160db52a479c6589e55f1 | Python | guilhermepaiva/Evolutionary-Computing | /project_evolutive_strategy.py | UTF-8 | 6,181 | 2.78125 | 3 | [] | no_license | import random
import math
import numpy as np
import matplotlib.pyplot as plt
import os
clear = lambda: os.system('cls')
# TODO random.uniform sem timestamp
PROBABILITY_CROSSOVER = 0.3
sum_fitness = []
mean_fitness = []
list_mean_fitness_100_generations = []
param_exploration = False
def makechromosome():
length_chromosome = 31
chromosome = [random.uniform(-15.0,15.0) for gene in range(length_chromosome-1)]
chromosome.append(np.random.normal(0,0.5))
return chromosome
def makepopulation(population_size):
return [makechromosome() for individual in range(population_size)]
def calculate_ackley(chromosome):
a, b, c = 20.0, 0.2, (math.pi)*2.0
first_sum = 0.0
for i in range(len(chromosome)-1):
first_sum += chromosome[i]**2.0
first_sum = math.sqrt(first_sum * (1.0/(len(chromosome)-1)))
first_sum *= -b
second_sum = 0.0
for j in range(len(chromosome)-1):
second_sum += math.cos(c*chromosome[j])
second_sum *= (1.0/(len(chromosome)-1))
result = (-a * math.exp(first_sum)) - math.exp(second_sum) + a + math.exp(1)
return result
def calculate_fitness(chromosome):
return 1.0/(calculate_ackley(chromosome) + 1.0)
def check_fitness(population):
ideal_fitness = 0.9
sum_fitness_current_population = 0.0
for individual in population:
fit = calculate_fitness(individual)
sum_fitness_current_population += fit
if fit >= ideal_fitness:
return True
sum_fitness.append(sum_fitness_current_population)
return False
def check_overfitting(current_generation):
mean_of_mean_fitness = 0.0
if current_generation % 100 == 0:
for i in range(current_generation-1, current_generation-100, -1):
mean_of_mean_fitness += mean_fitness[i]
mean_of_mean_fitness = mean_of_mean_fitness / 100
list_mean_fitness_100_generations.append(mean_of_mean_fitness)
if len(list_mean_fitness_100_generations) >= 2:
difference = math.fabs(list_mean_fitness_100_generations[-1] - list_mean_fitness_100_generations[-2])
if difference <= 0.004:
return True
else:
return False
else:
return False
def get_mean_fitness(population):
sum_fitness_current_population = 0.0
for individual in population:
fit = calculate_fitness(individual)
sum_fitness_current_population += fit
mean_fitness_current_population = sum_fitness_current_population/len(population)
mean_fitness.append(mean_fitness_current_population)
return mean_fitness_current_population
def mutate(chromosome):
threshold = 0.1
learning_tax = 1.0/math.sqrt(len(chromosome)-1)
# if param_exploration:
# normal_random = np.random.uniform(0, 10)
# else:
# normal_random = np.random.normal(0,1)
normal_random = np.random.normal(0,1)
next_sigma = chromosome[-1]*math.exp(learning_tax*normal_random)
if next_sigma < threshold:
next_sigma = threshold
new_individual = [chromosome[i] + next_sigma*np.random.normal(0,1) for i in range(len(chromosome)-1)]
new_individual.append(next_sigma)
return new_individual
def pick_pivots():
left = random.randrange(1, 29)
right = random.randrange(left, 30)
return left, right
def check_bounds(chromosome):
flag_return = True
chromosome_without_sigma = chromosome[:-1]
for nucleotide in chromosome_without_sigma:
if -15 <= nucleotide <= 15:
pass
else:
return False
return flag_return
def crossover_intermadiate(mate1, mate2):
chromosome = [((mate1[i] + mate2[i]) / 2.0) for i in range(len(mate1))]
# if param_exploration:
# #chromosome[-1] = np.random.uniform(0,100)
# chromosome[-1] = 10000.0
if check_bounds(chromosome) == True:
return chromosome
else:
return makechromosome()
def crossover_random_point(mate1, mate2):
left, right = pick_pivots()
child1 = mate1[:left] + mate2[left:]
child2 = mate2[:left] + mate1[left:]
# if param_exploration:
# # child1[-1] = np.random.uniform(0,100)
# # child2[-1] = np.random.uniform(0,100)
# child1[-1] = 10000.0
# child2[-1] = 10000.0
if calculate_fitness(child1) > calculate_fitness(child2):
return child1
else:
return child2
def crossover_scrotum(mate1, mate2):
length_chromosome = len(mate1)
return [random.uniform(-15.0,15.0) for gene in range(length_chromosome-1)]
def crossover(mate1, mate2):
probability = 0.5
if np.random.uniform(0.0, 1.0) < probability:
return crossover_intermadiate(mate1, mate2)
else:
return crossover_random_point(mate1, mate2)
#return crossover_scrotum(mate1, mate2)
#return crossover_scrotum(mate1, mate2)
def next_generation(population):
var_mu = len(population)-1
lambda_factor = 7
mates = []
for i in range(var_mu):
index = random.randint(0, len(population)-1)
individual = population[index]
mates.append(individual)
population.remove(individual)
offspring = []
for i in range(var_mu):
individual = mates[i]
for j in range(lambda_factor):
individual = mutate(individual)
offspring.append(individual)
if random.uniform(0.0, 1.0) <= PROBABILITY_CROSSOVER:
individual = crossover(individual,population[random.randint(0,len(population)-1)])
if param_exploration:
individual[-1] = np.random.uniform(2,3)
offspring.append(individual)
offspring = sorted(offspring, key=lambda x:calculate_fitness(x))
population = population + offspring[-var_mu:]
return population
def get_best_individual(population):
population = sorted(population, key=lambda x:calculate_fitness(x))
return population[-1]
if __name__ == "__main__":
my_population = makepopulation(30)
max_generations = 100000
current_generation = 0
while (not check_fitness(my_population) and current_generation < max_generations):
current_generation += 1
my_population = next_generation(my_population)
fitness_medio = get_mean_fitness(my_population)
#decide exploration or not
if check_overfitting(current_generation):
param_exploration = True
else:
param_exploration = False
#end decife exploration
print "Curent Generation: ", str(current_generation)
best_individual = get_best_individual(my_population)
print "Best Fitness: ", str(calculate_fitness(best_individual))
print str(len(range(1, max_generations+2)))
#plt.plot(range(1, max_generations+2), sum_fitness)
#plt.show()
plt.plot(range(1, max_generations+1), mean_fitness)
plt.show()
| [
"matplotlib"
] |
c9f72c8dabe8c4d359a6891039a4a5e857012b97 | Python | mbechtel2/EECS731-Project3 | /src/project3.py | UTF-8 | 5,214 | 3.625 | 4 | [] | no_license | ################################################################################
#
# File: project3
# Author: Michael Bechtel
# Date: September 28, 2020
# Class: EECS 731
# Description: Use slustering models to find similar movies in a given
# movie list using features such as the genres and ratings.
#
################################################################################
# Python imports
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.cluster import MeanShift
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
# Create the clustering models
kmeans_model = KMeans(n_clusters=2, random_state=0)
gmm_model = GaussianMixture(n_components=2)
ms_model = MeanShift()
dbscan_model = DBSCAN()
hac_model = AgglomerativeClustering(n_clusters=2)
# Read the raw datasets
movie_list = pd.read_csv("../data/raw/movies.csv")
ratings_list = pd.read_csv("../data/raw/ratings.csv")
# Create a 2D array / matrix for holding the individual genres for each movie
# Each cell represents a movie and genre combination
# If the movie is categorized as a genre, the cell will be set to a 1 value
# Otherwise, the cell will be set to a 0 value
genre_list = ["Action","Adventure","Animation","Children","Comedy","Crime","Documentary","Drama","Fantasy","Film-Noir","Horror","Musical","Mystery","Romance","Sci-Fi","Thriller","War","Western"]
genre_matrix = []
for genre in genre_list:
genre_matrix.append([])
# Parse through the movies. The following features are obtained:
# -Movie IDs
# -Average movie rating (get all rating values for a movie and compute the mean)
# -Movie genres (fill the genre_matrix based on the criteria given above
movie_ids = []
movie_genres = []
movie_ratings = []
for i in range(len(movie_list)):
movie_ids.append(movie_list["movieId"][i])
movie_ratings.append(ratings_list.query("movieId=={}".format(movie_ids[i]))["rating"].mean())
movie_genres.append(movie_list["genres"][i].split("|"))
for j,genre in enumerate(genre_list):
if genre in movie_genres[i]:
genre_matrix[j].append(1)
else:
genre_matrix[j].append(0)
# Create a new dataset with the obtained movie features
# Save the new dataset to the data/processed/ directory
movies_dataset = pd.DataFrame({"movieId":movie_ids,"rating":movie_ratings,
"Action":genre_matrix[0],"Adventure":genre_matrix[1],
"Animation":genre_matrix[2],"Children":genre_matrix[3],
"Comedy":genre_matrix[4],"Crime":genre_matrix[5],
"Documentary":genre_matrix[6],"Drama":genre_matrix[7],
"Fantasy":genre_matrix[8],"Film-Noir":genre_matrix[9],
"Horror":genre_matrix[10],"Musical":genre_matrix[11],
"Mystery":genre_matrix[12],"Romance":genre_matrix[13],
"Sci-Fi":genre_matrix[14],"Thriller":genre_matrix[15],
"War":genre_matrix[16],"Western":genre_matrix[17]}).dropna()
movies_dataset.to_csv("../data/processed/movies_dataset.csv")
# For each genre
for genre in genre_list:
# Print the current genre
print("Performing clustering on == {}".format(genre))
# Get the rating and genre columns from the overall dataset
genre_dataset = movies_dataset.loc[:,("rating",genre)]
# Create a grid of graphs so all results can be saved to a single image
# Only 5 models are tested, so the sixth graph is removed
_,graphs = plt.subplots(2,3)
graphs[1,2].set_axis_off()
# Perform K-means clustering and plot the results
movies_pred = kmeans_model.fit_predict(genre_dataset)
graphs[0,0].scatter(genre_dataset.iloc[:,0], genre_dataset.iloc[:,1], c=movies_pred)
graphs[0,0].set_title("K-Means")
# Perform GMM clustering and plot the results
movies_pred = gmm_model.fit_predict(genre_dataset)
graphs[0,1].scatter(genre_dataset.iloc[:,0], genre_dataset.iloc[:,1], c=movies_pred)
graphs[0,1].set_title("GMM")
# Perform Mean-Shift clustering and plot the results
movies_pred = ms_model.fit_predict(genre_dataset)
graphs[0,2].scatter(genre_dataset.iloc[:,0], genre_dataset.iloc[:,1], c=movies_pred)
graphs[0,2].set_title("Mean-Shift")
# Perform DBSCAN clustering and plot the results
movies_pred = dbscan_model.fit_predict(genre_dataset)
graphs[1,0].scatter(genre_dataset.iloc[:,0], genre_dataset.iloc[:,1], c=movies_pred)
graphs[1,0].set_title("DBSCAN")
# Perform Hierarchical Agglomerative Clustering (HAC) and plot the results
movies_pred = hac_model.fit_predict(genre_dataset)
graphs[1,1].scatter(genre_dataset.iloc[:,0], genre_dataset.iloc[:,1], c=movies_pred)
graphs[1,1].set_title("HAC")
# Increase the size of the graphs and then save them to the visualiztions/ directory
plt.gcf().set_size_inches((12.80,7.20), forward=False)
plt.savefig("../visualizations/{}.png".format(genre), bbox_inches='tight', dpi=100)
#plt.show()
| [
"matplotlib"
] |
2a8de93ddcba572ea9aea89727b6f8cbd6604613 | Python | IanEisenberg/CBMM | /programming_tutorial/programming_tutorial.py | UTF-8 | 2,103 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 16 12:10:54 2017
@author: ian
"""
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
input_dim = 50
output_dim = 60
input_vec = np.random.rand(input_dim)*2-1
weight_mat = np.random.rand(output_dim, input_dim)
output_vec = weight_mat.dot(input_vec)
# part 2
def GenerateVoltage(p,T,Vreset,Vthresh,V0):
V = [V0]
for i in range(T-1):
if V[-1]>Vthresh:
V.append(Vreset)
else:
if np.random.rand()<p:
vd=1
else:
vd=-1
V.append(V[-1]+vd)
return V
V=GenerateVoltage(.7,1000,-70,-45,-65)
plt.plot(V)
# part 3
from scipy.stats import expon
N = 3000
p = 20/1000
spiketrain = (np.random.rand(1,N) < p).flatten()
kernel = expon.pdf(range(-50,50), 5, 10)
output = np.convolve(spiketrain,kernel)
plt.figure(figsize=(12,8))
plt.subplot(3,1,1)
plt.plot(kernel)
plt.subplot(3,1,2)
plt.plot(spiketrain)
plt.subplot(3,1,3)
plt.plot(output)
# part 4
from PIL import Image
from scipy.signal import convolve2d
from skimage.transform import rotate
img = Image.open('octopus.png').convert('L')
k = np.matrix('0 0 0; 0 1.125 0; 0 0 0')-.125
k = k.dot(np.ones([3,3]))
conv_img = convolve2d(img,k)
plt.figure(figsize = (12,14))
plt.subplot(3,1,1)
plt.imshow(img)
plt.subplot(3,1,2)
plt.imshow(k)
plt.subplot(3,1,3)
plt.imshow(abs(conv_img))
def create_gabor():
vals = np.linspace(-np.pi,np.pi,50)
xgrid, ygrid = np.meshgrid(vals,vals)
the_gaussian = np.exp(-(xgrid/2)**2-(ygrid/2)**2)
# Simple sine wave grating : orientation = 0, phase = 0, amplitude = 1, frequency = 10/(2*pi)
the_sine = np.sin(xgrid * 2)
# Elementwise multiplication of Gaussian and sine wave grating
gabor = the_gaussian * the_sine
return gabor
rotation = 30
plt.figure(figsize = (12,14))
for i, r in enumerate([0,45,90]):
k = rotate(create_gabor(), r)
conv_img = convolve2d(img,k)
plt.subplot(3,2,i*2+1)
plt.imshow(k)
plt.subplot(3,2,i*2+2)
plt.imshow(abs(conv_img))
| [
"seaborn"
] |
a639e80eb619b65e62985324d5801ec099705a1c | Python | CYHSM/neuroai_hackathon | /average_rate_maps.py | UTF-8 | 2,457 | 2.765625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
basic_data = pd.read_pickle("basic_info_included_data/all_mice_df.pkl")
def average_firing_maps(dataframe, session_id, tetrode):
firing_map_size = 39
average_firing_map = np.zeros((firing_map_size, firing_map_size))
count = 0
for _, row in dataframe.iterrows():
if row["session_id"] == session_id:
if extract_tetrode(row) == tetrode:
count += 1
firing_map_to_add = (
row["firing_maps"][0:firing_map_size, 0:firing_map_size]
/ (row["firing_maps"][0:firing_map_size, 0:firing_map_size]).max()
)
average_firing_map += firing_map_to_add
print("count", count)
if count == 0:
return None
average_firing_map = average_firing_map / count
return average_firing_map, count, tetrode
def plot_average_firing_map(dataframe, session_id, tetrode):
average_firing_map = average_firing_maps(dataframe, session_id, tetrode)
if average_firing_map == None:
return
else:
average_firing_map, count, tetrode = average_firing_maps(
dataframe, session_id, tetrode
)
plt.imshow(average_firing_map)
plt.colorbar()
plt.savefig(
f"normalised/average_firing_map_tetrode_{tetrode}_cells_{count}_session_id_{session_id}.png"
)
plt.close()
def compute_firing_map_bias():
pass
def plot_a_firing_map(session_data):
plt.imshow(session_data["firing_maps"].values[0])
plt.savefig("test_firing_map.png")
plt.close()
def get_session_ids(df):
session_ids = df["session_id"]
unique_session_ids = set(session_ids)
return unique_session_ids
def get_all_tetrodes():
return set(basic_data["tetrode"])
def extract_tetrode(row):
for basic_row in basic_data.iterrows():
if (basic_row[1]["session_id"], basic_row[1]["cluster_id"]) == (
row["session_id"],
row["cluster_id"],
):
return basic_row[1]["tetrode"]
def main():
data_path = "SORTED_CLUSTERS/sorted_clusters.pkl"
df = pd.read_pickle(data_path)
session_ids = get_session_ids(df)
for session_id in session_ids:
tetrodes = get_all_tetrodes()
for tetrode in tetrodes:
plot_average_firing_map(df, session_id, tetrode)
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
d141d056a00eac8515bfbf5f32e1c0cd2fbf0b66 | Python | hsmall/CS224W-WordNet | /branching_factor_stats.py | UTF-8 | 5,046 | 3.078125 | 3 | [] | no_license | from snap import *
from WordNet import WordNet
import matplotlib.pyplot as plt
import pickle
def __main__():
depths = [1, 2, 3] # What depths to run for computing the branching factors
filename_template = "branching_graphs/null_branching_factor_by_decade_{0}.txt" #filenames where data is saved
# The next couple lines lines compute all of the branching factors for the graph and then saves them to a file
# CAN COMMENT THEM OUT to avoid recomputing when just trying to tweak the graphs
#wordnet = LoadWordNet(is_null_model=True)
#SaveBranchingFactorsToFile(wordnet, depths, filename_template)
# This section manipulates the computed branching factors and produces a graph
for max_depth in depths:
branching_factor_by_era = GroupDecades(pickle.load(open(filename_template.format(max_depth), "r")), 10)
average_by_era = []
for era in sorted(branching_factor_by_era.keys()):
if len(branching_factor_by_era[era]) == 0:
average_by_era.append(0)
continue
avg = 0.0
for word, year, branching_factor, out_degree in branching_factor_by_era[era]:
if out_degree == 0: continue
# I made a seperate graph for each of the below branching factor definitions.
avg += branching_factor #This is the standard "branching factor" as we defined at our meeting
#avg += (branching_factor/out_degree**max_depth) #This weights the branching factor by the out degree of the node to remove bias of high-degree nodes
avg = avg / len(branching_factor_by_era[era])
average_by_era.append(avg)
#print "{0} -> {1}".format(decade, branching_factor_by_decade[decade])
#print "{0}\n".format(avg)
plt.plot(sorted(branching_factor_by_era.keys()), normalize(average_by_era), label="Max Depth = {0}".format(max_depth))
plt.title('Normalized Average Branching Factor by Century')
plt.xlabel('Century'); plt.ylabel('Normalized Average Branching Factor')
plt.legend()
plt.show()
def LoadWordNet(is_null_model=False):
print "Loading WordNet Graph..."
wordnet = WordNet(["data/dict/data.noun", "data/dict/data.verb", "data/dict/data.adj", "data/dict/data.adv"], "data/word_to_year_formatted.txt", is_null_model)
print "Finished Loading Graph!"
return wordnet
# Computes the branching factor for each node (or word) at each of the given |depths|, then saves them to a file
# based upon the given |filename_template|
def SaveBranchingFactorsToFile(wordnet, depths, filename_template):
graph = wordnet.time_directed_graph_no_supernodes
for max_depth in depths:
print "Calculating Branching Factors (Depth = {0})".format(max_depth)
node_to_branching_factor = ComputeBranchingFactor(graph, max_depth)
branching_factor_by_decade = {decade: list() for decade in range(600, 2001, 10)}
for node in node_to_branching_factor.keys():
word = wordnet.node_to_word_directed_no_supernodes[node]
year = wordnet.word_to_date[word]
branching_factor = node_to_branching_factor[node]
out_degree = graph.GetNI(node).GetOutDeg()
decade = year - year % 10
branching_factor_by_decade[decade].append((word, year, branching_factor, out_degree))
pickle.dump(branching_factor_by_decade, open(filename_template.format(max_depth), "w"))
# Compute the branching factor for each node using the given |max_depth|
def ComputeBranchingFactor(graph, max_depth):
node_to_branching_factor = {}
nodes = [node.GetId() for node in graph.Nodes()]
for i in range(len(nodes)):
if i % 10000 == 0: print "{0} of {1}".format(i, len(nodes))
node_to_branching_factor[nodes[i]] = ComputeBranchingFactorHelper(graph, nodes[i], max_depth)
return node_to_branching_factor
# Recursive helper function for the above method. the weight multiplier is used so that when you
# traverse an edge with weight != 1, it reduces the weight of all subsequent edges in the traversal
def ComputeBranchingFactorHelper(graph, node, max_depth, weight_multiplier=1.0):
if max_depth == 0: return 0
branching_factor = 0;
for neighbor in graph.GetNI(node).GetOutEdges():
edge = graph.GetEI(node, neighbor)
edge_weight = graph.GetFltAttrDatE(edge, "weight")
branching_factor += edge_weight*weight_multiplier
branching_factor += ComputeBranchingFactorHelper(graph, neighbor, max_depth-1, edge_weight*weight_multiplier)
return branching_factor
# This method is used to smooth the data from "by-decade" data to some other time period. For example:
# if num_to_group = 5, the data will now be grouped by half-century (aka 50-year intervals/buckets)
def GroupDecades(branching_factor_by_decade, num_to_group):
grouped_data = {}
decades = sorted(branching_factor_by_decade.keys())
for i in range(0, len(decades), num_to_group):
start_decade = decades[i]
grouped_data[start_decade] = list()
for j in range(i, min(i+num_to_group, len(decades))):
grouped_data[start_decade].extend(branching_factor_by_decade[decades[j]])
return grouped_data
# Returns a normalized version of the input |vector|
def normalize(vector):
total = float(sum(vector))
return [elem / total for elem in vector]
__main__() | [
"matplotlib"
] |
40f00e9035282a2c660ef0cff3296dcb8ed2c8dd | Python | Holly-E/Matplotlib_Practice | /histogram.py | UTF-8 | 1,214 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 17:10:28 2018
@author: hollyerickson
"""
import matplotlib.pyplot as plt
# import from ticker to customize axis tick spacing
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from Goals_Module import AwayTeamGoals
from Goals_Module import HomeTeamGoals
gameNum = [num for num in range(1, len(HomeTeamGoals) + 1)]
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize = (7, 5))
ax1 = fig.add_subplot(1, 1, 1)
# HISTOGRAM
ax1.hist((HomeTeamGoals,AwayTeamGoals),
bins= range(0,8), # default is 10bins, can do int or sequence ex. [0,1,2,3,4,5,6,7] or range
align='left', #where to align the bins
# range = () Will remove outliers based on (start, end)
color = ('red', 'blue'),
# cumulative = True Includes the number before it plus itself
)
ax1.set_xlabel('Goals', labelpad=5)
ax1.set_ylabel('Number of Games')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.set_title('Home Team Goals Over Season', weight='600', fontdict = {'fontsize': 15})
#plt.savefig('Images/histogram', orientation='landscape')
plt.show()
| [
"matplotlib"
] |
68cd0b6dc28a32d23121cf70adab73ee263a006c | Python | HotView/PycharmProjects | /Tensorflow/CNN/莫烦python.py | UTF-8 | 1,594 | 2.859375 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data",one_hot=True)
def add_layer(inputs,in_size,out_size,activaion_function = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
Wx_plus_b = tf.matmul(inputs,Weights)+biases
if activaion_function is None:
outputs = Wx_plus_b
else:
outputs =activaion_function(Wx_plus_b)
return outputs
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5+noise
xs = tf.placeholder(tf.float32,[None,1])
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(xs,1,10,activaion_function=tf.nn.relu)
predition = add_layer(l1,10,1)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-predition),reduction_indices = [1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
init =tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%50==0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
try:
ax.lines.remove(lines[0])
except Exception:
pass
predition_value = sess.run(predition,feed_dict={xs:x_data})
lines = ax.plot(x_data,predition_value,'r-',lw =5)
plt.pause(0.1)
plt.pause(0)
| [
"matplotlib"
] |
e4e2aa8abd398dee0af6a659f9c8384ac25e937d | Python | sun510001/TitanicSurvivalPredict | /code/model.py | UTF-8 | 4,959 | 2.5625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import tensorflow as tf
from keras import models, layers
from keras.layers import Embedding, Dense, Flatten, Dropout, Input, LSTM, Bidirectional, CuDNNLSTM
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import StratifiedKFold
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=config)
def get_data(df, list_features):
data_x = df[list_features].values
data_y = df['Survived'].values
return data_x, data_y
def build_model_1(len_features):
inputs = Input(shape=(len_features,))
# x = Flatten()(inputs)
x = layers.Dense(24, activation='relu')(inputs)
x = Dropout(0.2)(x)
x = layers.Dense(24, activation='relu')(x)
# x = layers.Dense(24, activation='relu')(x)
x = Dense(1, activation='sigmoid')(x)
model = models.Model(inputs=inputs, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
# model.summary()
return model
def train_model(model, train_x, train_y):
skf = StratifiedKFold(n_splits=FOLD, shuffle=True, random_state=SEED)
for fold, (ids_train, ids_test) in enumerate(skf.split(train_x, train_y)):
sv = ModelCheckpoint(weight_fn.format(VER, fold), monitor='val_acc', verbose=1, save_best_only=True,
save_weights_only=False, mode='max')
history = model.fit(train_x[ids_train], train_y[ids_train],
validation_data=[train_x[ids_test], train_y[ids_test]], epochs=15, batch_size=1,
callbacks=[sv], verbose=1, shuffle=False)
# hist = history.history
# loss_values = hist['loss']
# val_loss_values = hist['val_loss']
# acc_values = hist['acc']
# val_acc_values = hist['val_acc']
# # draw image
# fig = plt.figure()
# epochs = range(1, len(loss_values) + 1)
# plt.plot(epochs, loss_values, 'bo', label='Training loss', color='b')
# plt.plot(epochs, val_loss_values, 'b', label='Validation loss', color='b')
# plt.plot(epochs, acc_values, 'bo', label='Training acc', color='r')
# plt.plot(epochs, val_acc_values, 'b', label='Validation acc', color='r')
# plt.title('Training and val loss')
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.legend()
# fig.savefig(fig_name, dpi=fig.dpi)
def train_it(list_features):
df_train = pd.read_csv('../data/train_proc.csv')
fig_name = 'loss.v1.png'
# print(train.shape, test.shape)
len_features = len(list_features)
train_x, train_y = get_data(df_train, list_features)
model = build_model_1(len_features)
train_model(model, train_x, train_y)
def test_it(list_features, num, df_gen):
# df_gen = pd.read_csv('../data/gender_submission.csv')
df_test = pd.read_csv('../data/test_proc.csv')
model = models.load_model(weight_fn.format(VER, num))
test_x = df_test[list_features].values
test_y = model.predict(test_x)
col_n = 'Survived-{0}'.format(num)
df_gen[col_n] = test_y
# df_gen.loc[df_gen[col_n] <= 0.5, col_n] = 0
# df_gen.loc[df_gen[col_n] > 0.5, col_n] = 1
# df_gen[col_n] = df_gen[col_n].astype(int)
return df_gen
# df_gen.to_csv('../data/gender_submission_out_{0}.csv'.format(num), index=False)
def com_answer(df):
# df = pd.read_csv('../data/gender_submission_out.csv')
df_ans = pd.read_csv('../data/100answer.csv')
df_2 = df.drop('PassengerId', axis=1)
# df['Survived'] = df['Survived-{0}'.format(1)]
df['Survived'] = df_2.mean(axis=1)
df.loc[df['Survived'] <= 0.5, 'Survived'] = 0
df.loc[df['Survived'] > 0.5, 'Survived'] = 1
df['Survived'] = df['Survived'].astype(int)
df['ans'] = df_ans['Survived']
count = [0]
def com(input):
if input[0] != input[1]:
count[0] += 1
df[['Survived', 'ans']].apply(com, axis=1)
miss = count[0]
sum = df['ans'].count()
rate = (sum - miss) / sum
print("Miss count:", miss, "\nSum count:", sum, "\nAcc rate:", rate)
print("Acc rate:", rate)
df = df[['PassengerId', 'Survived']]
df.to_csv('../data/gender_submission_out_6_11_2.csv', index=False)
if __name__ == '__main__':
list_features = ['Pclass', 'Sex', 'Fare', 'cabin_p1', 'Embarked', 'Parch', 'age_proc', 'fs',
'is_alone', 'Title', 'is_3class', 'has_cabin', 'name_len', 'Family_Survival']
weight_fn = '../data/{0}-titanic-{1}.h5'
rate = 0
FOLD = 5
SEED = 161
df_gen = pd.read_csv('../data/gender_submission.csv')
df_gen = df_gen.drop('Survived', axis=1)
VER = 2
# train_it(list_features)
for i in range(FOLD):
df_gen = test_it(list_features, i, df_gen)
com_answer(df_gen)
| [
"matplotlib"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.