Dataset Viewer
text
stringlengths 67
1.03M
| metadata
dict |
---|---|
# Notebook from stefanpeidli/cellphonedb
Path: scanpy_cellphonedb.ipynb
<code>
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import scanpy as sc
import cellphonedb as cphdb
# Original API works for python as well, it's just not really nice
import sys
sys.path.insert(0, './cellphonedb/src/api_endpoints/terminal_api/method_terminal_api_endpoints/')
from method_terminal_commands import statistical_analysis_____no_output_____
</code>
# Dev_____no_output_____## Original method_____no_output_____
<code>
# you need to download these from cellphonedb website / github and replace the path accordingly
dat = 'C:/Users/Stefan/Downloads/cellphonedb_example_data/example_data/'
metafile = dat+'test_meta.txt'
countfile = dat+'test_counts.txt'
statistical_analysis(meta_filename=metafile, counts_filename=countfile)[ ][APP][04/11/20-17:12:40][WARNING] Latest local available version is `v2.0.0`, using it
[ ][APP][04/11/20-17:12:40][WARNING] User selected downloaded database `v2.0.0` is available, using it
[ ][CORE][04/11/20-17:12:40][INFO] Initializing SqlAlchemy CellPhoneDB Core
[ ][CORE][04/11/20-17:12:40][INFO] Using custom database at C:\Users\Stefan\.cpdb\releases\v2.0.0\cellphone.db
[ ][APP][04/11/20-17:12:40][INFO] Launching Method cpdb_statistical_analysis_local_method_launcher
[ ][APP][04/11/20-17:12:40][INFO] Launching Method _set_paths
[ ][APP][04/11/20-17:12:40][WARNING] Output directory (C:\Users\Stefan\Documents\Github_Clones\cellphonedb/out) exist and is not empty. Result can overwrite old results
[ ][APP][04/11/20-17:12:40][INFO] Launching Method _load_meta_counts
[ ][CORE][04/11/20-17:12:40][INFO] Launching Method cpdb_statistical_analysis_launcher
[ ][CORE][04/11/20-17:12:40][INFO] Launching Method _counts_validations
[ ][CORE][04/11/20-17:12:40][INFO] [Cluster Statistical Analysis Simple] Threshold:0.1 Iterations:1000 Debug-seed:-1 Threads:4 Precision:3
[ ][CORE][04/11/20-17:12:40][INFO] Running Simple Prefilters
[ ][CORE][04/11/20-17:12:40][INFO] Running Real Simple Analysis
[ ][CORE][04/11/20-17:12:40][INFO] Running Statistical Analysis
[ ][CORE][04/11/20-17:13:27][INFO] Building Pvalues result
[ ][CORE][04/11/20-17:13:29][INFO] Building Simple results
[ ][CORE][04/11/20-17:13:29][INFO] [Cluster Statistical Analysis Complex] Threshold:0.1 Iterations:1000 Debug-seed:-1 Threads:4 Precision:3
[ ][CORE][04/11/20-17:13:29][INFO] Running Complex Prefilters
[ ][CORE][04/11/20-17:13:31][INFO] Running Real Complex Analysis
[ ][CORE][04/11/20-17:13:32][INFO] Running Statistical Analysis
[ ][CORE][04/11/20-17:14:38][INFO] Building Pvalues result
[ ][CORE][04/11/20-17:14:40][INFO] Building Complex results
pd.read_csv('./out/pvalues.csv')_____no_output_____
</code>
## scanpy API test official cellphonedb example data_____no_output_____
<code>
# you need to download these from cellphonedb website / github and replace the path accordingly
dat = 'C:/Users/Stefan/Downloads/cellphonedb_example_data/example_data/'
metafile = dat+'test_meta.txt'
countfile = dat+'test_counts.txt'_____no_output_____bdata=sc.AnnData(pd.read_csv(countfile, sep='\t',index_col=0).values.T, obs=pd.read_csv(metafile, sep='\t',index_col=0), var=pd.DataFrame(index=pd.read_csv(countfile, sep='\t',index_col=0).index.values))_____no_output_____outs=cphdb.statistical_analysis_scanpy(bdata, bdata.var_names, bdata.obs_names, 'cell_type')[ ][APP][04/11/20-17:14:43][WARNING] Latest local available version is `v2.0.0`, using it
[ ][APP][04/11/20-17:14:43][WARNING] User selected downloaded database `v2.0.0` is available, using it
[ ][CORE][04/11/20-17:14:43][INFO] Initializing SqlAlchemy CellPhoneDB Core
[ ][CORE][04/11/20-17:14:43][INFO] Using custom database at C:\Users\Stefan\.cpdb\releases\v2.0.0\cellphone.db
[ ][APP][04/11/20-17:14:43][INFO] Launching Method cpdb_statistical_analysis_local_method_launcher_scanpy
[ ][APP][04/11/20-17:14:43][INFO] Launching Method _set_paths
[ ][APP][04/11/20-17:14:43][WARNING] Output directory (C:\Users\Stefan\Documents\Github_Clones\cellphonedb/out) exist and is not empty. Result can overwrite old results
[ ][CORE][04/11/20-17:14:43][INFO] Launching Method cpdb_statistical_analysis_launcher
[ ][CORE][04/11/20-17:14:43][INFO] Launching Method _counts_validations
[ ][CORE][04/11/20-17:14:43][INFO] [Cluster Statistical Analysis Simple] Threshold:0.1 Iterations:1000 Debug-seed:-1 Threads:4 Precision:3
[ ][CORE][04/11/20-17:14:43][INFO] Running Simple Prefilters
[ ][CORE][04/11/20-17:14:43][INFO] Running Real Simple Analysis
[ ][CORE][04/11/20-17:14:43][INFO] Running Statistical Analysis
[ ][CORE][04/11/20-17:15:32][INFO] Building Pvalues result
[ ][CORE][04/11/20-17:15:35][INFO] Building Simple results
[ ][CORE][04/11/20-17:15:35][INFO] [Cluster Statistical Analysis Complex] Threshold:0.1 Iterations:1000 Debug-seed:-1 Threads:4 Precision:3
[ ][CORE][04/11/20-17:15:35][INFO] Running Complex Prefilters
[ ][CORE][04/11/20-17:15:37][INFO] Running Real Complex Analysis
[ ][CORE][04/11/20-17:15:38][INFO] Running Statistical Analysis
[ ][CORE][04/11/20-17:16:38][INFO] Building Pvalues result
[ ][CORE][04/11/20-17:16:39][INFO] Building Complex results
outs['pvalues']_____no_output_____# the output is also saved to
bdata.uns['cellphonedb_output']_____no_output_____bdata_____no_output_____
</code>
| {
"repository": "stefanpeidli/cellphonedb",
"path": "scanpy_cellphonedb.ipynb",
"matched_keywords": [
"Scanpy"
],
"stars": null,
"size": 63150,
"hexsha": "d000f1ce0f008b8f64f705810da78b9e62f26064",
"max_line_length": 212,
"avg_line_length": 45.3989935298,
"alphanum_fraction": 0.3578463975
} |
# Notebook from innawendell/European_Comedy
Path: Analyses/The Evolution of The Russian Comedy_Verse_Features.ipynb
## The Analysis of The Evolution of The Russian Comedy. Part 3._____no_output_____In this analysis,we will explore evolution of the French five-act comedy in verse based on the following features:
- The coefficient of dialogue vivacity;
- The percentage of scenes with split verse lines;
- The percentage of scenes with split rhymes;
- The percentage of open scenes.
- The percentage of scenes with split verse lines and rhymes.
We will tackle the following questions:
1. We will describe the features;
2. We will explore feature correlations.
3. We will check the features for normality using Shapiro-Wilk normality test. This will help us determine whether parametric vs. non-parametric statistical tests are more appropriate. If the features are not normally distributed, we will use non-parametric tests.
4. In our previous analysis of Sperantov's data, we discovered that instead of four periods of the Russian five-act tragedy in verse proposed by Sperantov, we can only be confident in the existence of two periods, where 1795 is the cut-off year. Therefore, we propose the following periods for the Russian verse comedy:
- Period One (from 1775 to 1794)
- Period Two (from 1795 to 1849).
5. We will run statistical tests to determine whether these two periods are statistically different.
6. We will create visualizations for each feature.
7. We will run descriptive statistics for each feature._____no_output_____
<code>
import pandas as pd
import numpy as np
import json
from os import listdir
from scipy.stats import shapiro
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns_____no_output_____def make_plot(feature, title):
mean, std, median = summary(feature)
plt.figure(figsize=(10, 7))
plt.title(title, fontsize=17)
sns.distplot(feature, kde=False)
mean_line = plt.axvline(mean,
color='black',
linestyle='solid',
linewidth=2); M1 = 'Mean';
median_line = plt.axvline(median,
color='green',linestyle='dashdot',
linewidth=2); M2='Median'
std_line = plt.axvline(mean + std,
color='black',
linestyle='dashed',
linewidth=2); M3 = 'Standard deviation';
plt.axvline(mean - std,
color='black',
linestyle='dashed',
linewidth=2)
plt.legend([mean_line, median_line, std_line], [M1, M2, M3])
plt.show()_____no_output_____def small_sample_mann_whitney_u_test(series_one, series_two):
values_one = series_one.sort_values().tolist()
values_two = series_two.sort_values().tolist()
# make sure there are no ties - this function only works for no ties
result_df = pd.DataFrame(values_one + values_two, columns=['combined']).sort_values(by='combined')
# average for ties
result_df['ranks'] = result_df['combined'].rank(method='average')
# make a dictionary where keys are values and values are ranks
val_to_rank = dict(zip(result_df['combined'].values, result_df['ranks'].values))
sum_ranks_one = np.sum([val_to_rank[num] for num in values_one])
sum_ranks_two = np.sum([val_to_rank[num] for num in values_two])
# number in sample one and two
n_one = len(values_one)
n_two = len(values_two)
# calculate the mann whitney u statistic which is the smaller of the u_one and u_two
u_one = ((n_one * n_two) + (n_one * (n_one + 1) / 2)) - sum_ranks_one
u_two = ((n_one * n_two) + (n_two * (n_two + 1) / 2)) - sum_ranks_two
# add a quality check
assert u_one + u_two == n_one * n_two
u_statistic = np.min([u_one, u_two])
return u_statistic_____no_output_____def summary(feature):
mean = feature.mean()
std = feature.std()
median = feature.median()
return mean, std, median_____no_output_____# updated boundaries
def determine_period(row):
if row <= 1794:
period = 1
else:
period = 2
return period_____no_output_____
</code>
## Part 1. Feature Descriptions_____no_output_____For the Russian corpus of the five-act comedies, we generated additional features that inspired by Iarkho. So far, we had no understanding how these features evolved over time and whether they could differentiate literary periods.
The features include the following:
1. **The Coefficient of Dialogue Vivacity**, i.e., the number of utterances in a play / the number of verse lines in a play. Since some of the comedies in our corpus were written in iambic hexameter while others were written in free iambs, it is important to clarify how we made sure the number of verse lines was comparable. Because Aleksandr Griboedov's *Woe From Wit* is the only four-act comedy in verse that had an extensive markup, we used it as the basis for our calculation.
- First, improved the Dracor's markup of the verse lines in *Woe From Wit*.
- Next, we calculated the number of verse lines in *Woe From Wit*, which was 2220.
- Then, we calculated the total number of syllables in *Woe From Wit*, which was 22076.
- We calculated the average number of syllables per verse line: 22076 / 2220 = 9.944144144144143.
- Finally, we divided the average number of syllables in *Woe From Wit* by the average number of syllables in a comedy written in hexameter, i.e., 12.5: 9.944144144144143 / 12.5 = 0.796.
- To convert the number of verse lines in a play written in free iambs and make it comparable with the comedies written in hexameter, we used the following formula: rescaled_number of verse lines = the number of verse lines in free iambs * 0.796.
- For example, in *Woe From Wit*, the number of verse lines = 2220, the rescaled number of verse lines = 2220 * 0.796 = 1767.12. The coefficient of dialogue vivacity = 702 / 1767.12 = 0.397.
2. **The Percentage of Scenes with Split Verse Lines**, i.e, the percentage of scenes where the end of a scene does not correspond with the end of a verse lines and the verse line extends into the next scene, e.g., "Не бойся. Онъ блажитъ. ЯВЛЕНІЕ 3. Какъ радъ что вижу васъ."
3. **The Percentage of Scenes with Split Rhymes**, i.e., the percentage of scenes that rhyme with other scenes, e.g., "Надѣюсъ на тебя, Вѣтрана, какъ на стѣну. ЯВЛЕНІЕ 4. И въ ней , какъ ни крѣпка, мы видимЪ перемѣну."
4. **The Percentage of Open Scenes**, i.e., the percentage of scenes with either split verse lines or rhymes.
5. **The Percentage of Scenes With Split Verse Lines and Rhymes**, i.e., the percentage of scenes that are connected through both means: by sharing a verse lines and a rhyme._____no_output_____
<code>
comedies = pd.read_csv('../Russian_Comedies/Data/Comedies_Raw_Data.csv')_____no_output_____# sort by creation date
comedies_sorted = comedies.sort_values(by='creation_date').copy()_____no_output_____# select only original comedies and five act
original_comedies = comedies_sorted[(comedies_sorted['translation/adaptation'] == 0) &
(comedies_sorted['num_acts'] == 5)].copy()_____no_output_____original_comedies.head()_____no_output_____original_comedies.shape_____no_output_____# rename column names for clarity
original_comedies = original_comedies.rename(columns={'num_scenes_iarkho': 'mobility_coefficient'})_____no_output_____comedies_verse_features = original_comedies[['index',
'title',
'first_name',
'last_name',
'creation_date',
'dialogue_vivacity',
'percentage_scene_split_verse',
'percentage_scene_split_rhymes',
'percentage_open_scenes',
'percentage_scenes_rhymes_split_verse']].copy()_____no_output_____comedies_verse_features.head()_____no_output_____
</code>
## Part 1. Feature Correlations_____no_output_____
<code>
comedies_verse_features[['dialogue_vivacity',
'percentage_scene_split_verse',
'percentage_scene_split_rhymes',
'percentage_open_scenes',
'percentage_scenes_rhymes_split_verse']].corr().round(2)_____no_output_____original_comedies[['dialogue_vivacity',
'mobility_coefficient']].corr()_____no_output_____
</code>
Dialogue vivacity is moderately positively correlated with the percentage of scenes with split verse lines (0.53), with the percentage of scenes with split rhymes (0.51), and slightly less correlated with the percentage of open scenes (0.45). However, it is strongly positively correlated with the percentage of scenes with both split rhymes and verse lines (0.73). The scenes with very fast-paced dialogue are more likely to be interconnected through both rhyme and shared verse lines. One unexpected discovery is that dialogue vivacity only weakly correlated with the mobility coefficient (0.06): more active movement of dramatic characters on stage does not necessarily entail that their utterances are going to be shorter.
The percentage of scenes with split verse lines is moderately positively correlated with the percentage of scenes with split rhymes (0.66): the scenes that are connected by verse are likely but not necessarily always going to be connected through rhyme.
Such features as the percentage of open scenes and the percentage of scenes with split rhymes and verse lines are strongly positively correlated with their constituent features (the correlation of the percentage of open scenes with the percentage of scenes with split verse lines is 0.86, with the percentage of split rhymes is 0.92). From this, we can infer that the bulk of the open scenes are connected through rhymes. The percentage of scenes with split rhymes and verse lines is strongly positively correlated with the percentage of scenes with split verse lines (0.87) and the percentage of scenes with split rhymes._____no_output_____## Part 3. Feature Distributions and Normality_____no_output_____
<code>
make_plot(comedies_verse_features['dialogue_vivacity'],
'Distribution of the Dialogue Vivacity Coefficient')/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
mean, std, median = summary(comedies_verse_features['dialogue_vivacity'])
print('Mean dialogue vivacity coefficient', round(mean, 2))
print('Standard deviation of the dialogue vivacity coefficient:', round(std, 2))
print('Median dialogue vivacity coefficient:', median)Mean dialogue vivacity coefficient 0.46
Standard deviation of the dialogue vivacity coefficient: 0.1
Median dialogue vivacity coefficient: 0.4575
</code>
### Shapiro-Wilk Normality Test_____no_output_____
<code>
print('The p-value of the Shapiro-Wilk normality test:',
shapiro(comedies_verse_features['dialogue_vivacity'])[1])The p-value of the Shapiro-Wilk normality test: 0.2067030817270279
</code>
The Shapiro-Wilk test showed that the probability of the coefficient of dialogue vivacity of being normally distributed was 0.2067030817270279, which was above the 0.05 significance level. We failed to reject the null hypothesis of the normal distribution._____no_output_____
<code>
make_plot(comedies_verse_features['percentage_scene_split_verse'],
'Distribution of The Percentage of Scenes with Split Verse Lines')_____no_output_____mean, std, median = summary(comedies_verse_features['percentage_scene_split_verse'])
print('Mean percentage of scenes with split verse lines:', round(mean, 2))
print('Standard deviation of the percentage of scenes with split verse lines:', round(std, 2))
print('Median percentage of scenes with split verse lines:', median)Mean percentage of scenes with split verse lines: 30.39
Standard deviation of the percentage of scenes with split verse lines: 14.39
Median percentage of scenes with split verse lines: 28.854
print('The p-value of the Shapiro-Wilk normality test:',
shapiro(comedies_verse_features['percentage_scene_split_verse'])[1])The p-value of the Shapiro-Wilk normality test: 0.8681985139846802
</code>
The Shapiro-Wilk showed that the probability of the percentage of scenes with split verse lines of being normally distributed was very high (the p-value is 0.8681985139846802). We failed to reject the null hypothesis of normal distribution._____no_output_____
<code>
make_plot(comedies_verse_features['percentage_scene_split_rhymes'],
'Distribution of The Percentage of Scenes with Split Rhymes')_____no_output_____mean, std, median = summary(comedies_verse_features['percentage_scene_split_rhymes'])
print('Mean percentage of scenes with split rhymes:', round(mean, 2))
print('Standard deviation of the percentage of scenes with split rhymes:', round(std, 2))
print('Median percentage of scenes with split rhymes:', median)Mean percentage of scenes with split rhymes: 39.77
Standard deviation of the percentage of scenes with split rhymes: 16.24
Median percentage of scenes with split rhymes: 36.6365
print('The p-value of the Shapiro-Wilk normality test:',
shapiro(comedies_verse_features['percentage_scene_split_rhymes'])[1])The p-value of the Shapiro-Wilk normality test: 0.5752763152122498
</code>
The Shapiro-Wilk test showed that the probability of the number of dramatic characters of being normally distributed was 0.5752763152122498. This probability was much higher than the 0.05 significance level. Therefore, we failed to reject the null hypothesis of normal distribution._____no_output_____
<code>
make_plot(comedies_verse_features['percentage_open_scenes'],
'Distribution of The Percentage of Open Scenes')_____no_output_____mean, std, median = summary(comedies_verse_features['percentage_open_scenes'])
print('Mean percentage of open scenes:', round(mean, 2))
print('Standard deviation of the percentage of open scenes:', round(std, 2))
print('Median percentage of open scenes:', median)Mean percentage of open scenes: 55.62
Standard deviation of the percentage of open scenes: 19.25
Median percentage of open scenes: 56.6605
print('The p-value of the Shapiro-Wilk normality test:',
shapiro(comedies_verse_features['percentage_open_scenes'])[1])The p-value of the Shapiro-Wilk normality test: 0.3018988370895386
</code>
The Shapiro-Wilk test showed that the probability of the number of the percentage of open scenes of being normally distributed was 0.3018988370895386, which was quite a lot higher than the significance level of 0.05. Therefore, we failed to reject the null hypothesis of normal distribution of the percentage of open scenes._____no_output_____
<code>
make_plot(comedies_verse_features['percentage_scenes_rhymes_split_verse'],
'Distribution of The Percentage of Scenes with Split Verse Lines and Rhymes')_____no_output_____mean, std, median = summary(comedies_verse_features['percentage_scenes_rhymes_split_verse'])
print('Mean percentage of scenes with split rhymes and verse lines:', round(mean, 2))
print('Standard deviation of the percentage of scenes with split rhymes and verse lines:', round(std, 2))
print('Median percentage of scenes with split rhymes and verse lines:', median)Mean percentage of scenes with split rhymes and verse lines: 14.53
Standard deviation of the percentage of scenes with split rhymes and verse lines: 9.83
Median percentage of scenes with split rhymes and verse lines: 13.0155
print('The p-value of the Shapiro-Wilk normality test:',
shapiro(comedies_verse_features['percentage_scenes_rhymes_split_verse'])[1])The p-value of the Shapiro-Wilk normality test: 0.015218793414533138
</code>
The Shapiro-Wilk test showed that the probability of the percentage of scenes with split verse lines and rhymes of being normally distributed was very low (the p-value was 0.015218793414533138). Therefore, we rejected the hypothesis of normal distribution._____no_output_____### Summary:
1. The majority of the verse features were normally distributed. For them, we could use a parametric statistical test.
2. The only feature that was not normally distributed was the percentage of scenes with split rhymes and verse lines. For this feature, we used a non-parametric test such as the Mann-Whitney u test._____no_output_____## Part 3. Hypothesis Testing_____no_output_____We will run statistical tests to determine whether the two periods distinguishable for the Russian five-act verse tragedy are significantly different for the Russian five-act comedy. The two periods are:
- Period One (from 1747 to 1794)
- Period Two (from 1795 to 1822)
For all features that were normally distributed, we will use *scipy.stats* Python library to run a **t-test** to check whether there is a difference between Period One and Period Two. The null hypothesis is that there is no difference between the two periods. The alternative hypothesis is that the two periods are different. Our significance level will be set at 0.05. If the p-value produced by the t-test will be below 0.05, we will reject the null hypothesis of no difference.
For the percentage of scenes with split rhymes and verse lines, we will run **the Mann-Whitney u-test** to check whether there is a difference between Period One and Period Two. The null hypothesis will be no difference between these periods, whereas the alternative hypothesis will be that the periods will be different.
Since both periods have fewer than 20 tragedies, we cannot use the scipy's Man-Whitney u-test that requires each sample size to be at least 20 because it uses normal approximation. Instead, we will have to run Mann-Whitney U-test without a normal approximation for which we wrote a custom function. The details about the test can be found in the following resource: https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_nonparametric/bs704_nonparametric4.html.
One limitation that we need to mention is the sample size. The first period has only six comedies and the second period has only ten. However, it is impossible to increase the sample size - we cannot ask the Russian playwrights of the eighteenth and nineteenth century to produce more five-act verse comedies. If there are other Russian five-act comedies of these periods, they are either unknown or not available to us._____no_output_____
<code>
comedies_verse_features['period'] = comedies_verse_features.creation_date.apply(determine_period)_____no_output_____period_one = comedies_verse_features[comedies_verse_features['period'] == 1].copy()
period_two = comedies_verse_features[comedies_verse_features['period'] == 2].copy()_____no_output_____period_one.shape_____no_output_____period_two.shape_____no_output_____
</code>
## The T-Test_____no_output_____### The Coefficient of Dialogue Vivacity_____no_output_____
<code>
from scipy.stats import ttest_ind_____no_output_____ttest_ind(period_one['dialogue_vivacity'],
period_two['dialogue_vivacity'], equal_var=False)_____no_output_____
</code>
### The Percentage of Scenes With Split Verse Lines_____no_output_____
<code>
ttest_ind(period_one['percentage_scene_split_verse'],
period_two['percentage_scene_split_verse'], equal_var=False)_____no_output_____
</code>
### The Percentage of Scnes With Split Rhymes_____no_output_____
<code>
ttest_ind(period_one['percentage_scene_split_rhymes'],
period_two['percentage_scene_split_rhymes'], equal_var=False)_____no_output_____
</code>
### The Percentage of Open Scenes_____no_output_____
<code>
ttest_ind(period_one['percentage_open_scenes'],
period_two['percentage_open_scenes'], equal_var=False)_____no_output_____
</code>
### Summary
|Feature |p-value |Result
|---------------------------| ----------------|--------------------------------
| The coefficient of dialogue vivacity |0.92 | Not Significant
|The percentage of scenes with split verse lines|0.009 | Significant
|The percentage of scenes with split rhymes| 0.44| Not significant
|The percentage of open scenes| 0.10| Not significant_____no_output_____## The Mann-Whitney Test_____no_output_____The Process:
- Our null hypothesis is that there is no difference between two periods. Our alternative hypothesis is that the periods are different.
- We will set the signficance level (alpha) at 0.05.
- We will run the test and calculate the test statistic.
- We will compare the test statistic with the critical value of U for a two-tailed test at alpha=0.05. Critical values can be found at https://www.real-statistics.com/statistics-tables/mann-whitney-table/.
- If our test statistic is equal or lower than the critical value of U, we will reject the null hypothesis. Otherwise, we will fail to reject it._____no_output_____### The Percentage of Scenes With Split Verse Lines and Rhymes_____no_output_____
<code>
small_sample_mann_whitney_u_test(period_one['percentage_scenes_rhymes_split_verse'],
period_two['percentage_scenes_rhymes_split_verse'])_____no_output_____
</code>
### Critical Value of U_____no_output_____|Periods |Critical Value of U
|---------------------------| ----------------
| Period One (n=6) and Period Two (n=10) |11
_____no_output_____### Summary
|Feature |u-statistic |Result
|---------------------------| ----------------|--------------------------------
| The percentage of scenes with split verse lines and rhymes|21 | Not Significant_____no_output_____We discovered that the distribution of only one feature, the percentage of scenes with split verse lines, was different in Periods One and Two. Distributions of other features did not prove to be significantly different. _____no_output_____## Part 4. Visualizations_____no_output_____
<code>
def scatter(df, feature, title, xlabel, text_y):
sns.jointplot('creation_date',
feature,
data=df,
color='b',
height=7).plot_joint(
sns.kdeplot,
zorder=0,
n_levels=20)
plt.axvline(1795, color='grey',linestyle='dashed', linewidth=2)
plt.text(1795.5, text_y, '1795')
plt.title(title, fontsize=20, pad=100)
plt.xlabel('Date', fontsize=14)
plt.ylabel(xlabel, fontsize=14)
plt.show()_____no_output_____
</code>
### The Coefficient of Dialogue Vivacity_____no_output_____
<code>
scatter(comedies_verse_features,
'dialogue_vivacity',
'The Coefficient of Dialogue Vivacity by Year',
'The Coefficient of Dialogue Vivacity',
0.85)/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
</code>
### The Percentage of Scenes With Split Verse Lines_____no_output_____
<code>
scatter(comedies_verse_features,
'percentage_scene_split_verse',
'The Percentage of Scenes With Split Verse Lines by Year',
'Percentage of Scenes With Split Verse Lines',
80)/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
</code>
### The Percentage of Scenes With Split Rhymes_____no_output_____
<code>
scatter(comedies_verse_features,
'percentage_scene_split_rhymes',
'The Percentage of Scenes With Split Rhymes by Year',
'The Percentage of Scenes With Split Rhymes',
80)/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
</code>
### The Percentage of Open Scenes_____no_output_____
<code>
scatter(comedies_verse_features,
'percentage_open_scenes',
'The Percentage of Open Scenes by Year',
'The Percentage of Open Scenes',
100)/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
</code>
### The Percentage of Scenes With Split Verse Lines and Rhymes_____no_output_____
<code>
scatter(comedies_verse_features,
'percentage_scenes_rhymes_split_verse',
' The Percentage of Scenes With Split Verse Lines and Rhymes by Year',
' The Percentage of Scenes With Split Verse Lines and Rhymes',
45)/opt/anaconda3/envs/text_extraction/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
</code>
## Part 5. Descriptive Statistics For Two Periods and Overall_____no_output_____### The Coefficient of Dialogue Vivacity_____no_output_____#### In Entire Corpus_____no_output_____
<code>
comedies_verse_features.describe().loc[:, 'dialogue_vivacity'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
#### By Tentative Periods_____no_output_____
<code>
comedies_verse_features.groupby('period').describe().loc[:, 'dialogue_vivacity'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
### The Percentage of Scenes With Split Verse Lines_____no_output_____#### In Entire Corpus_____no_output_____
<code>
comedies_verse_features.describe().loc[:, 'percentage_scene_split_verse'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
#### By Periods_____no_output_____
<code>
comedies_verse_features.groupby('period').describe().loc[:, 'percentage_scene_split_verse'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
### The Percentage of Scenes With Split Rhymes_____no_output_____
<code>
comedies_verse_features.describe().loc[:, 'percentage_scene_split_rhymes'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
#### By Tentative Periods_____no_output_____
<code>
comedies_verse_features.groupby('period').describe().loc[:, 'percentage_scene_split_rhymes'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
### The Percentage of Open Scenes_____no_output_____#### In Entire Corpus_____no_output_____
<code>
comedies_verse_features.describe().loc[:, 'percentage_open_scenes'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
#### By Tenative Periods_____no_output_____
<code>
comedies_verse_features.groupby('period').describe().loc[:, 'percentage_open_scenes'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
### The Percentage of Scenes With Split Verse Lines or Rhymes_____no_output_____
<code>
comedies_verse_features.describe().loc[:, 'percentage_scenes_rhymes_split_verse'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____comedies_verse_features.groupby('period').describe().loc[:, 'percentage_scenes_rhymes_split_verse'][['mean',
'std',
'50%',
'min',
'max']].round(2)_____no_output_____
</code>
### Summary:
1. The mean dialogue vivacity in the corpus of the Russian five-act comedy in verse was 0.46, with a 0.10 standard deviation. In the tentative Period One, the mean dialogue vivacity was 0.46, the same as in the tentative Period Two. The standard deviation increased from 0.05 in the tentative Period One to 0.13 in the tentative Period Two.
2. The mean percentage of scenes with split verse lines in the corpus was 30.39%, with a standard deviation of 14.39. In Period One, the mean percentage of scenes with split verse lines was 19.37%, with a standard deviation of 10.16. In Period Two, the mean percentage of scenes with split verse lines almost doubled to 37%, with a standard deviation of 12.57%.
3. The average percentage of scenes with split rhymes was higher in the entire corpus of the Russian five-act comedies in verse than the average percentage of scenes with split verse lines (39.77% vs. 30.39%), as was the standard deviation (16.24% vs. 14.39%). The percentage of scenes with split rhymes grew from the tentative Period One to the tentative Period Two from 35.55% to 42.30%; the standard deviation slightly increased from 15.73% to 16.82%.
4. In the corpus, the average percentage of open scenes was 55.62%, i.e., more than half of all scenes were connected either through rhyme or verse lines. The standard deviation was 19.25%. In the tentative Period One, the percentage of open scenes was 44.65%, with a standard deviation of 19.76%. In the tentative Period Two, the percentage of open scenes increased to 62.21%, with a standard deviation of 16.50%, i.e., the standard deviation was lower in Period Two.
5. For the corpus, only 14.53% of all scenes were connected through both rhymes and verse lines. The standard deviation of the percentage of scenes with split verse lines and rhymes was 9.83%. In the tentative Period One, the mean percentage of scenes with split verse lines and rhymes was 10.27%, with a standard deviation of 5.22%. In the tentative Period Two, the mean percentage of scenes with split verse lines and rhymes was 17.09%, with a much higher standard deviation of 11.25%._____no_output_____## Conclusions:
1. The majority of the examined features were normally distributed, except for the percentage of scenes with split verse lines and rhymes.
2. The distribution of the percentage of scenes with split verse lines differed significantly between Period One (from 1775 to 1794) and Period Two (from 1795 to 1849)).
2. For other verse features, there was no evidence to suggest that the two periods of the Russian five-act comedy in verse are significantly different.
3. The mean values of all examined features (except for the vivacity coefficient) increased from tentative Period One to Period Two. The mean vivacity coefficient remained the same from the tentative Period One to Period Two. The standard deviation of all examined features (except for the percentage of open scenes) increased from Period One to Period Two.
4. Judging by the natural clustering in the data evident from visualizations, 1805 may be a more appropriate boundary between the two time periods for comedy._____no_output_____
| {
"repository": "innawendell/European_Comedy",
"path": "Analyses/The Evolution of The Russian Comedy_Verse_Features.ipynb",
"matched_keywords": [
"evolution"
],
"stars": null,
"size": 948447,
"hexsha": "d002bc0e0081d73349f836a6e32db713d13f5fa2",
"max_line_length": 157244,
"avg_line_length": 383.0561389338,
"alphanum_fraction": 0.9222391973
} |
# Notebook from quantopian/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
Path: Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb
# Chapter 4
`Original content created by Cam Davidson-Pilon`
`Ported to Python 3 and PyMC3 by Max Margenot (@clean_utensils) and Thomas Wiecki (@twiecki) at Quantopian (@quantopian)`
______
## The greatest theorem never told
This chapter focuses on an idea that is always bouncing around our minds, but is rarely made explicit outside books devoted to statistics. In fact, we've been using this simple idea in every example thus far. _____no_output_____### The Law of Large Numbers
Let $Z_i$ be $N$ independent samples from some probability distribution. According to *the Law of Large numbers*, so long as the expected value $E[Z]$ is finite, the following holds,
$$\frac{1}{N} \sum_{i=1}^N Z_i \rightarrow E[ Z ], \;\;\; N \rightarrow \infty.$$
In words:
> The average of a sequence of random variables from the same distribution converges to the expected value of that distribution.
This may seem like a boring result, but it will be the most useful tool you use._____no_output_____### Intuition
If the above Law is somewhat surprising, it can be made more clear by examining a simple example.
Consider a random variable $Z$ that can take only two values, $c_1$ and $c_2$. Suppose we have a large number of samples of $Z$, denoting a specific sample $Z_i$. The Law says that we can approximate the expected value of $Z$ by averaging over all samples. Consider the average:
$$ \frac{1}{N} \sum_{i=1}^N \;Z_i $$
By construction, $Z_i$ can only take on $c_1$ or $c_2$, hence we can partition the sum over these two values:
\begin{align}
\frac{1}{N} \sum_{i=1}^N \;Z_i
& =\frac{1}{N} \big( \sum_{ Z_i = c_1}c_1 + \sum_{Z_i=c_2}c_2 \big) \\\\[5pt]
& = c_1 \sum_{ Z_i = c_1}\frac{1}{N} + c_2 \sum_{ Z_i = c_2}\frac{1}{N} \\\\[5pt]
& = c_1 \times \text{ (approximate frequency of $c_1$) } \\\\
& \;\;\;\;\;\;\;\;\; + c_2 \times \text{ (approximate frequency of $c_2$) } \\\\[5pt]
& \approx c_1 \times P(Z = c_1) + c_2 \times P(Z = c_2 ) \\\\[5pt]
& = E[Z]
\end{align}
Equality holds in the limit, but we can get closer and closer by using more and more samples in the average. This Law holds for almost *any distribution*, minus some important cases we will encounter later.
##### Example
____
Below is a diagram of the Law of Large numbers in action for three different sequences of Poisson random variables.
We sample `sample_size = 100000` Poisson random variables with parameter $\lambda = 4.5$. (Recall the expected value of a Poisson random variable is equal to it's parameter.) We calculate the average for the first $n$ samples, for $n=1$ to `sample_size`. _____no_output_____
<code>
%matplotlib inline
import numpy as np
from IPython.core.pylabtools import figsize
import matplotlib.pyplot as plt
figsize( 12.5, 5 )
sample_size = 100000
expected_value = lambda_ = 4.5
poi = np.random.poisson
N_samples = range(1,sample_size,100)
for k in range(3):
samples = poi( lambda_, sample_size )
partial_average = [ samples[:i].mean() for i in N_samples ]
plt.plot( N_samples, partial_average, lw=1.5,label="average \
of $n$ samples; seq. %d"%k)
plt.plot( N_samples, expected_value*np.ones_like( partial_average),
ls = "--", label = "true expected value", c = "k" )
plt.ylim( 4.35, 4.65)
plt.title( "Convergence of the average of \n random variables to its \
expected value" )
plt.ylabel( "average of $n$ samples" )
plt.xlabel( "# of samples, $n$")
plt.legend();_____no_output_____
</code>
Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how *jagged and jumpy* the average is initially, then *smooths* out). All three paths *approach* the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for *flirting*: convergence.
Another very relevant question we can ask is *how quickly am I converging to the expected value?* Let's plot something new. For a specific $N$, let's do the above trials thousands of times and compute how far away we are from the true expected value, on average. But wait — *compute on average*? This is simply the law of large numbers again! For example, we are interested in, for a specific $N$, the quantity:
$$D(N) = \sqrt{ \;E\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \;\;\right] \;\;}$$
The above formulae is interpretable as a distance away from the true value (on average), for some $N$. (We take the square root so the dimensions of the above quantity and our random variables are the same). As the above is an expected value, it can be approximated using the law of large numbers: instead of averaging $Z_i$, we calculate the following multiple times and average them:
$$ Y_k = \left( \;\frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \; \right)^2 $$
By computing the above many, $N_y$, times (remember, it is random), and averaging them:
$$ \frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k \rightarrow E[ Y_k ] = E\;\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \right]$$
Finally, taking the square root:
$$ \sqrt{\frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k} \approx D(N) $$ _____no_output_____
<code>
figsize( 12.5, 4)
N_Y = 250 #use this many to approximate D(N)
N_array = np.arange( 1000, 50000, 2500 ) #use this many samples in the approx. to the variance.
D_N_results = np.zeros( len( N_array ) )
lambda_ = 4.5
expected_value = lambda_ #for X ~ Poi(lambda) , E[ X ] = lambda
def D_N( n ):
"""
This function approx. D_n, the average variance of using n samples.
"""
Z = poi( lambda_, (n, N_Y) )
average_Z = Z.mean(axis=0)
return np.sqrt( ( (average_Z - expected_value)**2 ).mean() )
for i,n in enumerate(N_array):
D_N_results[i] = D_N(n)
plt.xlabel( "$N$" )
plt.ylabel( "expected squared-distance from true value" )
plt.plot(N_array, D_N_results, lw = 3,
label="expected distance between\n\
expected value and \naverage of $N$ random variables.")
plt.plot( N_array, np.sqrt(expected_value)/np.sqrt(N_array), lw = 2, ls = "--",
label = r"$\frac{\sqrt{\lambda}}{\sqrt{N}}$" )
plt.legend()
plt.title( "How 'fast' is the sample average converging? " );_____no_output_____
</code>
As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the *rate* of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but *20 000* more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease.
It turns out we can measure this rate of convergence. Above I have plotted a second line, the function $\sqrt{\lambda}/\sqrt{N}$. This was not chosen arbitrarily. In most cases, given a sequence of random variable distributed like $Z$, the rate of convergence to $E[Z]$ of the Law of Large Numbers is
$$ \frac{ \sqrt{ \; Var(Z) \; } }{\sqrt{N} }$$
This is useful to know: for a given large $N$, we know (on average) how far away we are from the estimate. On the other hand, in a Bayesian setting, this can seem like a useless result: Bayesian analysis is OK with uncertainty so what's the *statistical* point of adding extra precise digits? Though drawing samples can be so computationally cheap that having a *larger* $N$ is fine too.
### How do we compute $Var(Z)$ though?
The variance is simply another expected value that can be approximated! Consider the following, once we have the expected value (by using the Law of Large Numbers to estimate it, denote it $\mu$), we can estimate the variance:
$$ \frac{1}{N}\sum_{i=1}^N \;(Z_i - \mu)^2 \rightarrow E[ \;( Z - \mu)^2 \;] = Var( Z )$$
### Expected values and probabilities
There is an even less explicit relationship between expected value and estimating probabilities. Define the *indicator function*
$$\mathbb{1}_A(x) =
\begin{cases} 1 & x \in A \\\\
0 & else
\end{cases}
$$
Then, by the law of large numbers, if we have many samples $X_i$, we can estimate the probability of an event $A$, denoted $P(A)$, by:
$$ \frac{1}{N} \sum_{i=1}^N \mathbb{1}_A(X_i) \rightarrow E[\mathbb{1}_A(X)] = P(A) $$
Again, this is fairly obvious after a moments thought: the indicator function is only 1 if the event occurs, so we are summing only the times the event occurs and dividing by the total number of trials (consider how we usually approximate probabilities using frequencies). For example, suppose we wish to estimate the probability that a $Z \sim Exp(.5)$ is greater than 5, and we have many samples from a $Exp(.5)$ distribution.
$$ P( Z > 5 ) = \sum_{i=1}^N \mathbb{1}_{z > 5 }(Z_i) $$
_____no_output_____
<code>
N = 10000
print( np.mean( [ np.random.exponential( 0.5 ) > 5 for i in range(N) ] ) )0.0001
</code>
### What does this all have to do with Bayesian statistics?
*Point estimates*, to be introduced in the next chapter, in Bayesian inference are computed using expected values. In more analytical Bayesian inference, we would have been required to evaluate complicated expected values represented as multi-dimensional integrals. No longer. If we can sample from the posterior distribution directly, we simply need to evaluate averages. Much easier. If accuracy is a priority, plots like the ones above show how fast you are converging. And if further accuracy is desired, just take more samples from the posterior.
When is enough enough? When can you stop drawing samples from the posterior? That is the practitioners decision, and also dependent on the variance of the samples (recall from above a high variance means the average will converge slower).
We also should understand when the Law of Large Numbers fails. As the name implies, and comparing the graphs above for small $N$, the Law is only true for large sample sizes. Without this, the asymptotic result is not reliable. Knowing in what situations the Law fails can give us *confidence in how unconfident we should be*. The next section deals with this issue._____no_output_____## The Disorder of Small Numbers
The Law of Large Numbers is only valid as $N$ gets *infinitely* large: never truly attainable. While the law is a powerful tool, it is foolhardy to apply it liberally. Our next example illustrates this.
##### Example: Aggregated geographic data
Often data comes in aggregated form. For instance, data may be grouped by state, county, or city level. Of course, the population numbers vary per geographic area. If the data is an average of some characteristic of each the geographic areas, we must be conscious of the Law of Large Numbers and how it can *fail* for areas with small populations.
We will observe this on a toy dataset. Suppose there are five thousand counties in our dataset. Furthermore, population number in each state are uniformly distributed between 100 and 1500. The way the population numbers are generated is irrelevant to the discussion, so we do not justify this. We are interested in measuring the average height of individuals per county. Unbeknownst to us, height does **not** vary across county, and each individual, regardless of the county he or she is currently living in, has the same distribution of what their height may be:
$$ \text{height} \sim \text{Normal}(150, 15 ) $$
We aggregate the individuals at the county level, so we only have data for the *average in the county*. What might our dataset look like?_____no_output_____
<code>
figsize( 12.5, 4)
std_height = 15
mean_height = 150
n_counties = 5000
pop_generator = np.random.randint
norm = np.random.normal
#generate some artificial population numbers
population = pop_generator(100, 1500, n_counties )
average_across_county = np.zeros( n_counties )
for i in range( n_counties ):
#generate some individuals and take the mean
average_across_county[i] = norm(mean_height, 1./std_height,
population[i] ).mean()
#located the counties with the apparently most extreme average heights.
i_min = np.argmin( average_across_county )
i_max = np.argmax( average_across_county )
#plot population size vs. recorded average
plt.scatter( population, average_across_county, alpha = 0.5, c="#7A68A6")
plt.scatter( [ population[i_min], population[i_max] ],
[average_across_county[i_min], average_across_county[i_max] ],
s = 60, marker = "o", facecolors = "none",
edgecolors = "#A60628", linewidths = 1.5,
label="extreme heights")
plt.xlim( 100, 1500 )
plt.title( "Average height vs. County Population")
plt.xlabel("County Population")
plt.ylabel("Average height in county")
plt.plot( [100, 1500], [150, 150], color = "k", label = "true expected \
height", ls="--" )
plt.legend(scatterpoints = 1);_____no_output_____
</code>
What do we observe? *Without accounting for population sizes* we run the risk of making an enormous inference error: if we ignored population size, we would say that the county with the shortest and tallest individuals have been correctly circled. But this inference is wrong for the following reason. These two counties do *not* necessarily have the most extreme heights. The error results from the calculated average of smaller populations not being a good reflection of the true expected value of the population (which in truth should be $\mu =150$). The sample size/population size/$N$, whatever you wish to call it, is simply too small to invoke the Law of Large Numbers effectively.
We provide more damning evidence against this inference. Recall the population numbers were uniformly distributed over 100 to 1500. Our intuition should tell us that the counties with the most extreme population heights should also be uniformly spread over 100 to 4000, and certainly independent of the county's population. Not so. Below are the population sizes of the counties with the most extreme heights._____no_output_____
<code>
print("Population sizes of 10 'shortest' counties: ")
print(population[ np.argsort( average_across_county )[:10] ], '\n')
print("Population sizes of 10 'tallest' counties: ")
print(population[ np.argsort( -average_across_county )[:10] ])Population sizes of 10 'shortest' counties:
[109 135 135 133 109 157 175 120 105 131]
Population sizes of 10 'tallest' counties:
[122 133 313 109 124 280 106 198 326 216]
</code>
Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers.
##### Example: Kaggle's *U.S. Census Return Rate Challenge*
Below is data from the 2010 US census, which partitions populations beyond counties to the level of block groups (which are aggregates of city blocks or equivalents). The dataset is from a Kaggle machine learning competition some colleagues and I participated in. The objective was to predict the census letter mail-back rate of a group block, measured between 0 and 100, using census variables (median income, number of females in the block-group, number of trailer parks, average number of children etc.). Below we plot the census mail-back rate versus block group population:_____no_output_____
<code>
figsize( 12.5, 6.5 )
data = np.genfromtxt( "./data/census_data.csv", skip_header=1,
delimiter= ",")
plt.scatter( data[:,1], data[:,0], alpha = 0.5, c="#7A68A6")
plt.title("Census mail-back rate vs Population")
plt.ylabel("Mail-back rate")
plt.xlabel("population of block-group")
plt.xlim(-100, 15e3 )
plt.ylim( -5, 105)
i_min = np.argmin( data[:,0] )
i_max = np.argmax( data[:,0] )
plt.scatter( [ data[i_min,1], data[i_max, 1] ],
[ data[i_min,0], data[i_max,0] ],
s = 60, marker = "o", facecolors = "none",
edgecolors = "#A60628", linewidths = 1.5,
label="most extreme points")
plt.legend(scatterpoints = 1);_____no_output_____
</code>
The above is a classic phenomenon in statistics. I say *classic* referring to the "shape" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact).
I am perhaps overstressing the point and maybe I should have titled the book *"You don't have big data problems!"*, but here again is an example of the trouble with *small datasets*, not big ones. Simply, small datasets cannot be processed using the Law of Large Numbers. Compare with applying the Law without hassle to big datasets (ex. big data). I mentioned earlier that paradoxically big data prediction problems are solved by relatively simple algorithms. The paradox is partially resolved by understanding that the Law of Large Numbers creates solutions that are *stable*, i.e. adding or subtracting a few data points will not affect the solution much. On the other hand, adding or removing data points to a small dataset can create very different results.
For further reading on the hidden dangers of the Law of Large Numbers, I would highly recommend the excellent manuscript [The Most Dangerous Equation](http://nsm.uh.edu/~dgraur/niv/TheMostDangerousEquation.pdf). _____no_output_____##### Example: How to order Reddit submissions
You may have disagreed with the original statement that the Law of Large numbers is known to everyone, but only implicitly in our subconscious decision making. Consider ratings on online products: how often do you trust an average 5-star rating if there is only 1 reviewer? 2 reviewers? 3 reviewers? We implicitly understand that with such few reviewers that the average rating is **not** a good reflection of the true value of the product.
This has created flaws in how we sort items, and more generally, how we compare items. Many people have realized that sorting online search results by their rating, whether the objects be books, videos, or online comments, return poor results. Often the seemingly top videos or comments have perfect ratings only from a few enthusiastic fans, and truly more quality videos or comments are hidden in later pages with *falsely-substandard* ratings of around 4.8. How can we correct this?
Consider the popular site Reddit (I purposefully did not link to the website as you would never come back). The site hosts links to stories or images, called submissions, for people to comment on. Redditors can vote up or down on each submission (called upvotes and downvotes). Reddit, by default, will sort submissions to a given subreddit by Hot, that is, the submissions that have the most upvotes recently.
<img src="http://i.imgur.com/3v6bz9f.png" />
How would you determine which submissions are the best? There are a number of ways to achieve this:
1. *Popularity*: A submission is considered good if it has many upvotes. A problem with this model is that a submission with hundreds of upvotes, but thousands of downvotes. While being very *popular*, the submission is likely more controversial than best.
2. *Difference*: Using the *difference* of upvotes and downvotes. This solves the above problem, but fails when we consider the temporal nature of submission. Depending on when a submission is posted, the website may be experiencing high or low traffic. The difference method will bias the *Top* submissions to be the those made during high traffic periods, which have accumulated more upvotes than submissions that were not so graced, but are not necessarily the best.
3. *Time adjusted*: Consider using Difference divided by the age of the submission. This creates a *rate*, something like *difference per second*, or *per minute*. An immediate counter-example is, if we use per second, a 1 second old submission with 1 upvote would be better than a 100 second old submission with 99 upvotes. One can avoid this by only considering at least t second old submission. But what is a good t value? Does this mean no submission younger than t is good? We end up comparing unstable quantities with stable quantities (young vs. old submissions).
3. *Ratio*: Rank submissions by the ratio of upvotes to total number of votes (upvotes plus downvotes). This solves the temporal issue, such that new submissions who score well can be considered Top just as likely as older submissions, provided they have many upvotes to total votes. The problem here is that a submission with a single upvote (ratio = 1.0) will beat a submission with 999 upvotes and 1 downvote (ratio = 0.999), but clearly the latter submission is *more likely* to be better.
I used the phrase *more likely* for good reason. It is possible that the former submission, with a single upvote, is in fact a better submission than the later with 999 upvotes. The hesitation to agree with this is because we have not seen the other 999 potential votes the former submission might get. Perhaps it will achieve an additional 999 upvotes and 0 downvotes and be considered better than the latter, though not likely.
What we really want is an estimate of the *true upvote ratio*. Note that the true upvote ratio is not the same as the observed upvote ratio: the true upvote ratio is hidden, and we only observe upvotes vs. downvotes (one can think of the true upvote ratio as "what is the underlying probability someone gives this submission a upvote, versus a downvote"). So the 999 upvote/1 downvote submission probably has a true upvote ratio close to 1, which we can assert with confidence thanks to the Law of Large Numbers, but on the other hand we are much less certain about the true upvote ratio of the submission with only a single upvote. Sounds like a Bayesian problem to me.
_____no_output_____One way to determine a prior on the upvote ratio is to look at the historical distribution of upvote ratios. This can be accomplished by scraping Reddit's submissions and determining a distribution. There are a few problems with this technique though:
1. Skewed data: The vast majority of submissions have very few votes, hence there will be many submissions with ratios near the extremes (see the "triangular plot" in the above Kaggle dataset), effectively skewing our distribution to the extremes. One could try to only use submissions with votes greater than some threshold. Again, problems are encountered. There is a tradeoff between number of submissions available to use and a higher threshold with associated ratio precision.
2. Biased data: Reddit is composed of different subpages, called subreddits. Two examples are *r/aww*, which posts pics of cute animals, and *r/politics*. It is very likely that the user behaviour towards submissions of these two subreddits are very different: visitors are likely friendly and affectionate in the former, and would therefore upvote submissions more, compared to the latter, where submissions are likely to be controversial and disagreed upon. Therefore not all submissions are the same.
In light of these, I think it is better to use a `Uniform` prior.
With our prior in place, we can find the posterior of the true upvote ratio. The Python script `top_showerthoughts_submissions.py` will scrape the best posts from the `showerthoughts` community on Reddit. This is a text-only community so the title of each post *is* the post. Below is the top post as well as some other sample posts:_____no_output_____
<code>
#adding a number to the end of the %run call with get the ith top post.
%run top_showerthoughts_submissions.py 2
print("Post contents: \n")
print(top_post)Post contents:
Toilet paper should be free and have advertising printed on it.
"""
contents: an array of the text from the last 100 top submissions to a subreddit
votes: a 2d numpy array of upvotes, downvotes for each submission.
"""
n_submissions = len(votes)
submissions = np.random.randint( n_submissions, size=4)
print("Some Submissions (out of %d total) \n-----------"%n_submissions)
for i in submissions:
print('"' + contents[i] + '"')
print("upvotes/downvotes: ",votes[i,:], "\n")Some Submissions (out of 98 total)
-----------
"Rappers from the 90's used guns when they had beef rappers today use Twitter."
upvotes/downvotes: [32 3]
"All polls are biased towards people who are willing to take polls"
upvotes/downvotes: [1918 101]
"Taco Bell should give customers an extra tortilla so they can make a burrito out of all the stuff that spilled out of the other burritos they ate."
upvotes/downvotes: [79 17]
"There should be an /r/alanismorissette where it's just examples of people using "ironic" incorrectly"
upvotes/downvotes: [33 6]
</code>
For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular submission's upvote/downvote pair._____no_output_____
<code>
import pymc3 as pm
def posterior_upvote_ratio( upvotes, downvotes, samples = 20000):
"""
This function accepts the number of upvotes and downvotes a particular submission recieved,
and the number of posterior samples to return to the user. Assumes a uniform prior.
"""
N = upvotes + downvotes
with pm.Model() as model:
upvote_ratio = pm.Uniform("upvote_ratio", 0, 1)
observations = pm.Binomial( "obs", N, upvote_ratio, observed=upvotes)
trace = pm.sample(samples, step=pm.Metropolis())
burned_trace = trace[int(samples/4):]
return burned_trace["upvote_ratio"]
_____no_output_____
</code>
Below are the resulting posterior distributions._____no_output_____
<code>
figsize( 11., 8)
posteriors = []
colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"]
for i in range(len(submissions)):
j = submissions[i]
posteriors.append( posterior_upvote_ratio( votes[j, 0], votes[j,1] ) )
plt.hist( posteriors[i], bins = 10, normed = True, alpha = .9,
histtype="step",color = colours[i%5], lw = 3,
label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )
plt.hist( posteriors[i], bins = 10, normed = True, alpha = .2,
histtype="stepfilled",color = colours[i], lw = 3, )
plt.legend(loc="upper left")
plt.xlim( 0, 1)
plt.title("Posterior distributions of upvote ratios on different submissions");Applied interval-transform to upvote_ratio and added transformed upvote_ratio_interval_ to model.
[-------100%-------] 20000 of 20000 in 1.4 sec. | SPS: 14595.5 | ETA: 0.0Applied interval-transform to upvote_ratio and added transformed upvote_ratio_interval_ to model.
[-------100%-------] 20000 of 20000 in 1.3 sec. | SPS: 15189.5 | ETA: 0.0Applied interval-transform to upvote_ratio and added transformed upvote_ratio_interval_ to model.
[-------100%-------] 20000 of 20000 in 1.3 sec. | SPS: 15429.0 | ETA: 0.0Applied interval-transform to upvote_ratio and added transformed upvote_ratio_interval_ to model.
[-------100%-------] 20000 of 20000 in 1.3 sec. | SPS: 15146.5 | ETA: 0.0
</code>
Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be.
### Sorting!
We have been ignoring the goal of this exercise: how do we sort the submissions from *best to worst*? Of course, we cannot sort distributions, we must sort scalar numbers. There are many ways to distill a distribution down to a scalar: expressing the distribution through its expected value, or mean, is one way. Choosing the mean is a bad choice though. This is because the mean does not take into account the uncertainty of distributions.
I suggest using the *95% least plausible value*, defined as the value such that there is only a 5% chance the true parameter is lower (think of the lower bound on the 95% credible region). Below are the posterior distributions with the 95% least-plausible value plotted:_____no_output_____
<code>
N = posteriors[0].shape[0]
lower_limits = []
for i in range(len(submissions)):
j = submissions[i]
plt.hist( posteriors[i], bins = 20, normed = True, alpha = .9,
histtype="step",color = colours[i], lw = 3,
label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )
plt.hist( posteriors[i], bins = 20, normed = True, alpha = .2,
histtype="stepfilled",color = colours[i], lw = 3, )
v = np.sort( posteriors[i] )[ int(0.05*N) ]
#plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 )
plt.vlines( v, 0, 10 , color = colours[i], linestyles = "--", linewidths=3 )
lower_limits.append(v)
plt.legend(loc="upper left")
plt.legend(loc="upper left")
plt.title("Posterior distributions of upvote ratios on different submissions");
order = np.argsort( -np.array( lower_limits ) )
print(order, lower_limits)[1 0 2 3] [0.80034320917496615, 0.94092009444598201, 0.74660503350561902, 0.72190353389632911]
</code>
The best submissions, according to our procedure, are the submissions that are *most-likely* to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1.
Why is sorting based on this quantity a good idea? By ordering by the 95% least plausible value, we are being the most conservative with what we think is best. That is, even in the worst case scenario, when we have severely overestimated the upvote ratio, we can be sure the best submissions are still on top. Under this ordering, we impose the following very natural properties:
1. given two submissions with the same observed upvote ratio, we will assign the submission with more votes as better (since we are more confident it has a higher ratio).
2. given two submissions with the same number of votes, we still assign the submission with more upvotes as *better*.
### But this is too slow for real-time!
I agree, computing the posterior of every submission takes a long time, and by the time you have computed it, likely the data has changed. I delay the mathematics to the appendix, but I suggest using the following formula to compute the lower bound very fast.
$$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$
where
\begin{align}
& a = 1 + u \\\\
& b = 1 + d \\\\
\end{align}
$u$ is the number of upvotes, and $d$ is the number of downvotes. The formula is a shortcut in Bayesian inference, which will be further explained in Chapter 6 when we discuss priors in more detail.
_____no_output_____
<code>
def intervals(u,d):
a = 1. + u
b = 1. + d
mu = a/(a+b)
std_err = 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1.) ) )
return ( mu, std_err )
print("Approximate lower bounds:")
posterior_mean, std_err = intervals(votes[:,0],votes[:,1])
lb = posterior_mean - std_err
print(lb)
print("\n")
print("Top 40 Sorted according to approximate lower bounds:")
print("\n")
order = np.argsort( -lb )
ordered_contents = []
for i in order[:40]:
ordered_contents.append( contents[i] )
print(votes[i,0], votes[i,1], contents[i])
print("-------------")Approximate lower bounds:
[ 0.93349005 0.9532194 0.94149718 0.90859764 0.88705356 0.8558795
0.85644927 0.93752679 0.95767101 0.91131012 0.910073 0.915999
0.9140058 0.83276025 0.87593961 0.87436674 0.92830849 0.90642832
0.89187973 0.89950891 0.91295322 0.78607629 0.90250203 0.79950031
0.85219422 0.83703439 0.7619808 0.81301134 0.7313114 0.79137561
0.82701445 0.85542404 0.82309334 0.75211374 0.82934814 0.82674958
0.80933194 0.87448152 0.85350205 0.75460106 0.82934814 0.74417233
0.79924258 0.8189683 0.75460106 0.90744016 0.83838023 0.78802791
0.78400654 0.64638659 0.62047936 0.76137738 0.81365241 0.83838023
0.78457533 0.84980627 0.79249393 0.69020315 0.69593922 0.70758151
0.70268831 0.91620627 0.73346864 0.86382644 0.80877728 0.72708753
0.79822085 0.68333632 0.81699014 0.65100453 0.79809005 0.74702492
0.77318569 0.83221179 0.66500492 0.68134548 0.7249286 0.59412132
0.58191312 0.73142963 0.73142963 0.66251028 0.87152685 0.74107856
0.60935684 0.87152685 0.77484517 0.88783675 0.81814153 0.54569789
0.6122496 0.75613569 0.53511973 0.74556767 0.81814153 0.85773646
0.6122496 0.64814153]
Top 40 Sorted according to approximate lower bounds:
596 18 Someone should develop an AI specifically for reading Terms & Conditions and flagging dubious parts.
-------------
2360 98 Porn is the only industry where it is not only acceptable but standard to separate people based on race, sex and sexual preference.
-------------
1918 101 All polls are biased towards people who are willing to take polls
-------------
948 50 They should charge less for drinks in the drive-thru because you can't refill them.
-------------
3740 239 When I was in elementary school and going through the DARE program, I was positive a gang of older kids was going to corner me and force me to smoke pot. Then I became an adult and realized nobody is giving free drugs to somebody that doesn't want them.
-------------
166 7 "Noted" is the professional way of saying "K".
-------------
29 0 Rewatching Mr. Bean, I've realised that the character is an eccentric genius and not a blithering idiot.
-------------
289 18 You've been doing weird cameos in your friends' dreams since kindergarten.
-------------
269 17 At some point every parent has stopped wiping their child's butt and hoped for the best.
-------------
121 6 Is it really fair to say a person over 85 has heart failure? Technically, that heart has done exceptionally well.
-------------
535 40 It's surreal to think that the sun and moon and stars we gaze up at are the same objects that have been observed for millenia, by everyone in the history of humanity from cavemen to Aristotle to Jesus to George Washington.
-------------
527 40 I wonder if America's internet is censored in a similar way that North Korea's is, but we have no idea of it happening.
-------------
1510 131 Kenny's family is poor because they're always paying for his funeral.
-------------
43 1 If I was as careful with my whole paycheck as I am with my last $20 I'd be a whole lot better off
-------------
162 10 Black hair ties are probably the most popular bracelets in the world.
-------------
107 6 The best answer to the interview question "What is your greatest weakness?" is "interviews".
-------------
127 8 Surfing the internet without ads feels like a summer evening without mosquitoes
-------------
159 12 I wonder if Superman ever put a pair of glasses on Lois Lane's dog, and she was like "what's this Clark? Did you get me a new dog?"
-------------
21 0 Sitting on a cold toilet seat or a warm toilet seat both suck for different reasons.
-------------
1414 157 My life is really like Rihanna's song, "just work work work work work" and the rest of it I can't really understand.
-------------
222 22 I'm honestly slightly concerned how often Reddit commenters make me laugh compared to my real life friends.
-------------
52 3 The world must have been a spookier place altogether when candles and gas lamps were the only sources of light at night besides the moon and the stars.
-------------
194 19 I have not been thankful enough in the last few years that the Black Eyed Peas are no longer ever on the radio
-------------
18 0 Living on the coast is having the window seat of the land you live on.
-------------
18 0 Binoculars are like walkie talkies for the deaf.
-------------
28 1 Now that I am a parent of multiple children I have realized that my parents were lying through their teeth when they said they didn't have a favorite.
-------------
16 0 I sneer at people who read tabloids, but every time I look someone up on Wikipedia the first thing I look for is what controversies they've been involved in.
-------------
1559 233 Kid's menus at restaurants should be smaller portions of the same adult dishes at lower prices and not the junk food that they usually offer.
-------------
1426 213 Eventually once all phones are waterproof we'll be able to push people into pools again
-------------
61 5 Myspace is so outdated that jokes about it being outdated has become outdated
-------------
52 4 As a kid, seeing someone step on a banana peel and not slip was a disappointment.
-------------
90 9 Yahoo!® is the RadioShack® of the Internet.
-------------
34 2 People who "tell it like it is" rarely do so to say something nice
-------------
39 3 Closing your eyes after turning off your alarm is a very dangerous game.
-------------
39 3 Your known 'first word' is the first word your parents heard you speak. In reality, it may have been a completely different word you said when you were alone.
-------------
87 10 "Smells Like Teen Spirit" is as old to listeners of today as "Yellow Submarine" was to listeners of 1991.
-------------
239 36 if an ocean didnt stop immigrants from coming to America what makes us think a wall will?
-------------
22 1 The phonebook was the biggest invasion of privacy that everyone was oddly ok with.
-------------
57 6 I'm actually the most productive when I procrastinate because I'm doing everything I possibly can to avoid the main task at hand.
-------------
57 6 You will never feel how long time is until you have allergies and snot slowly dripping out of your nostrils, while sitting in a classroom with no tissues.
-------------
</code>
We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern. _____no_output_____
<code>
r_order = order[::-1][-40:]
plt.errorbar( posterior_mean[r_order], np.arange( len(r_order) ),
xerr=std_err[r_order], capsize=0, fmt="o",
color = "#7A68A6")
plt.xlim( 0.3, 1)
plt.yticks( np.arange( len(r_order)-1,-1,-1 ), map( lambda x: x[:30].replace("\n",""), ordered_contents) );_____no_output_____
</code>
In the graphic above, you can see why sorting by mean would be sub-optimal._____no_output_____### Extension to Starred rating systems
The above procedure works well for upvote-downvotes schemes, but what about systems that use star ratings, e.g. 5 star rating systems. Similar problems apply with simply taking the average: an item with two perfect ratings would beat an item with thousands of perfect ratings, but a single sub-perfect rating.
We can consider the upvote-downvote problem above as binary: 0 is a downvote, 1 if an upvote. A $N$-star rating system can be seen as a more continuous version of above, and we can set $n$ stars rewarded is equivalent to rewarding $\frac{n}{N}$. For example, in a 5-star system, a 2 star rating corresponds to 0.4. A perfect rating is a 1. We can use the same formula as before, but with $a,b$ defined differently:
$$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$
where
\begin{align}
& a = 1 + S \\\\
& b = 1 + N - S \\\\
\end{align}
where $N$ is the number of users who rated, and $S$ is the sum of all the ratings, under the equivalence scheme mentioned above. _____no_output_____##### Example: Counting Github stars
What is the average number of stars a Github repository has? How would you calculate this? There are over 6 million respositories, so there is more than enough data to invoke the Law of Large numbers. Let's start pulling some data. TODO_____no_output_____### Conclusion
While the Law of Large Numbers is cool, it is only true so much as its name implies: with large sample sizes only. We have seen how our inference can be affected by not considering *how the data is shaped*.
1. By (cheaply) drawing many samples from the posterior distributions, we can ensure that the Law of Large Number applies as we approximate expected values (which we will do in the next chapter).
2. Bayesian inference understands that with small sample sizes, we can observe wild randomness. Our posterior distribution will reflect this by being more spread rather than tightly concentrated. Thus, our inference should be correctable.
3. There are major implications of not considering the sample size, and trying to sort objects that are unstable leads to pathological orderings. The method provided above solves this problem.
_____no_output_____### Appendix
##### Derivation of sorting submissions formula
Basically what we are doing is using a Beta prior (with parameters $a=1, b=1$, which is a uniform distribution), and using a Binomial likelihood with observations $u, N = u+d$. This means our posterior is a Beta distribution with parameters $a' = 1 + u, b' = 1 + (N - u) = 1+d$. We then need to find the value, $x$, such that 0.05 probability is less than $x$. This is usually done by inverting the CDF ([Cumulative Distribution Function](http://en.wikipedia.org/wiki/Cumulative_Distribution_Function)), but the CDF of the beta, for integer parameters, is known but is a large sum [3].
We instead use a Normal approximation. The mean of the Beta is $\mu = a'/(a'+b')$ and the variance is
$$\sigma^2 = \frac{a'b'}{ (a' + b')^2(a'+b'+1) }$$
Hence we solve the following equation for $x$ and have an approximate lower bound.
$$ 0.05 = \Phi\left( \frac{(x - \mu)}{\sigma}\right) $$
$\Phi$ being the [cumulative distribution for the normal distribution](http://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution)
_____no_output_____##### Exercises
1\. How would you estimate the quantity $E\left[ \cos{X} \right]$, where $X \sim \text{Exp}(4)$? What about $E\left[ \cos{X} | X \lt 1\right]$, i.e. the expected value *given* we know $X$ is less than 1? Would you need more samples than the original samples size to be equally accurate?_____no_output_____
<code>
## Enter code here
import scipy.stats as stats
exp = stats.expon( scale=4 )
N = 1e5
X = exp.rvs( int(N) )
## ..._____no_output_____
</code>
2\. The following table was located in the paper "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression" [2]. The table ranks football field-goal kickers by their percent of non-misses. What mistake have the researchers made?
-----
#### Kicker Careers Ranked by Make Percentage
<table><tbody><tr><th>Rank </th><th>Kicker </th><th>Make % </th><th>Number of Kicks</th></tr><tr><td>1 </td><td>Garrett Hartley </td><td>87.7 </td><td>57</td></tr><tr><td>2</td><td> Matt Stover </td><td>86.8 </td><td>335</td></tr><tr><td>3 </td><td>Robbie Gould </td><td>86.2 </td><td>224</td></tr><tr><td>4 </td><td>Rob Bironas </td><td>86.1 </td><td>223</td></tr><tr><td>5</td><td> Shayne Graham </td><td>85.4 </td><td>254</td></tr><tr><td>… </td><td>… </td><td>…</td><td> </td></tr><tr><td>51</td><td> Dave Rayner </td><td>72.2 </td><td>90</td></tr><tr><td>52</td><td> Nick Novak </td><td>71.9 </td><td>64</td></tr><tr><td>53 </td><td>Tim Seder </td><td>71.0 </td><td>62</td></tr><tr><td>54 </td><td>Jose Cortez </td><td>70.7</td><td> 75</td></tr><tr><td>55 </td><td>Wade Richey </td><td>66.1</td><td> 56</td></tr></tbody></table>_____no_output_____In August 2013, [a popular post](http://bpodgursky.wordpress.com/2013/08/21/average-income-per-programming-language/) on the average income per programmer of different languages was trending. Here's the summary chart: (reproduced without permission, cause when you lie with stats, you gunna get the hammer). What do you notice about the extremes?
------
#### Average household income by programming language
<table >
<tr><td>Language</td><td>Average Household Income ($)</td><td>Data Points</td></tr>
<tr><td>Puppet</td><td>87,589.29</td><td>112</td></tr>
<tr><td>Haskell</td><td>89,973.82</td><td>191</td></tr>
<tr><td>PHP</td><td>94,031.19</td><td>978</td></tr>
<tr><td>CoffeeScript</td><td>94,890.80</td><td>435</td></tr>
<tr><td>VimL</td><td>94,967.11</td><td>532</td></tr>
<tr><td>Shell</td><td>96,930.54</td><td>979</td></tr>
<tr><td>Lua</td><td>96,930.69</td><td>101</td></tr>
<tr><td>Erlang</td><td>97,306.55</td><td>168</td></tr>
<tr><td>Clojure</td><td>97,500.00</td><td>269</td></tr>
<tr><td>Python</td><td>97,578.87</td><td>2314</td></tr>
<tr><td>JavaScript</td><td>97,598.75</td><td>3443</td></tr>
<tr><td>Emacs Lisp</td><td>97,774.65</td><td>355</td></tr>
<tr><td>C#</td><td>97,823.31</td><td>665</td></tr>
<tr><td>Ruby</td><td>98,238.74</td><td>3242</td></tr>
<tr><td>C++</td><td>99,147.93</td><td>845</td></tr>
<tr><td>CSS</td><td>99,881.40</td><td>527</td></tr>
<tr><td>Perl</td><td>100,295.45</td><td>990</td></tr>
<tr><td>C</td><td>100,766.51</td><td>2120</td></tr>
<tr><td>Go</td><td>101,158.01</td><td>231</td></tr>
<tr><td>Scala</td><td>101,460.91</td><td>243</td></tr>
<tr><td>ColdFusion</td><td>101,536.70</td><td>109</td></tr>
<tr><td>Objective-C</td><td>101,801.60</td><td>562</td></tr>
<tr><td>Groovy</td><td>102,650.86</td><td>116</td></tr>
<tr><td>Java</td><td>103,179.39</td><td>1402</td></tr>
<tr><td>XSLT</td><td>106,199.19</td><td>123</td></tr>
<tr><td>ActionScript</td><td>108,119.47</td><td>113</td></tr>
</table>_____no_output_____### References
1. Wainer, Howard. *The Most Dangerous Equation*. American Scientist, Volume 95.
2. Clarck, Torin K., Aaron W. Johnson, and Alexander J. Stimpson. "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression." (2013): n. page. [Web](http://www.sloansportsconference.com/wp-content/uploads/2013/Going%20for%20Three%20Predicting%20the%20Likelihood%20of%20Field%20Goal%20Success%20with%20Logistic%20Regression.pdf). 20 Feb. 2013.
3. http://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function_____no_output_____
<code>
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()_____no_output_____
</code>
<style>
img{
max-width:800px}
</style>_____no_output_____
| {
"repository": "quantopian/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers",
"path": "Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb",
"matched_keywords": [
"STAR"
],
"stars": 74,
"size": 633929,
"hexsha": "d002f51b520dfb6f7f7c8f13e0401f22dc925760",
"max_line_length": 117692,
"avg_line_length": 528.2741666667,
"alphanum_fraction": 0.9198332936
} |
# Notebook from ethiry99/HW16_Amazon_Vine_Analysis
Path: Vine_Review_Analysis.ipynb
<code>
# Dependencies and Setup
import pandas as pd_____no_output_____vine_review_df=pd.read_csv("Resources/vine_table.csv")
_____no_output_____vine_review_df.head()
_____no_output_____vine_review_df=vine_review_df.loc[(vine_review_df["total_votes"] >= 20) & (vine_review_df["helpful_votes"]/vine_review_df["total_votes"] >= .5)]_____no_output_____vine_review_df.head()_____no_output_____vine_rv_paid_df=vine_review_df.loc[vine_review_df["vine"]=="Y"]
vine_rv_paid_count=len(vine_rv_paid_df)
#print(f"5 Star paid percent {vine_five_star_paid_percent:.1%}\n"
print(f"Paid vine reviews = {vine_rv_paid_count}")Paid vine reviews = 386
vine_rv_unpaid_df=vine_review_df.loc[vine_review_df["vine"]=="N"]
vine_rv_unpaid_count=len(vine_rv_unpaid_df)
print(f"Paid (vine) reviews = {vine_rv_paid_count}")
print(f"Unpaid (vine) reviews = {vine_rv_unpaid_count}")Paid (vine) reviews = 386
Unpaid (vine) reviews = 48717
vine_rv_paid_five_star_df=vine_rv_paid_df.loc[(vine_rv_paid_df["star_rating"]==5)]
five_star_paid_count=len(vine_rv_paid_five_star_df)
print(f"Five star paid reviews = {five_star_paid_count}")Five star paid reviews = 176
vine_rv_unpaid_five_star_df=vine_rv_unpaid_df.loc[(vine_rv_unpaid_df["star_rating"]==5)]
five_star_unpaid_count=len(vine_rv_unpaid_five_star_df)
print(f"Five star paid reviews = {five_star_paid_count}")
print(f"Five star unpaid reviews = {five_star_unpaid_count}")Five star paid reviews = 176
Five star unpaid reviews = 24026
vine_five_star_paid_percent=five_star_paid_count/vine_rv_paid_count
vine_five_star_paid_percent_____no_output_____vine_five_star_unpaid_percent=five_star_unpaid_count/vine_rv_unpaid_count
vine_five_star_unpaid_percent_____no_output_____print(f"5 Star paid percent {vine_five_star_paid_percent:.1%}\n"
f"5 Star unpaid percent {vine_five_star_unpaid_percent:.1%}")5 Star paid percent 45.6%
5 Star unpaid percent 49.3%
</code>
| {
"repository": "ethiry99/HW16_Amazon_Vine_Analysis",
"path": "Vine_Review_Analysis.ipynb",
"matched_keywords": [
"STAR"
],
"stars": null,
"size": 11488,
"hexsha": "d00352c042e11023e04cd767f979253bf98e6a8d",
"max_line_length": 156,
"avg_line_length": 25.9322799097,
"alphanum_fraction": 0.4093837047
} |
# Notebook from bbglab/adventofcode
Path: 2016/loris/day_1.ipynb
# Advent of Code 2016_____no_output_____
<code>
data = open('data/day_1-1.txt', 'r').readline().strip().split(', ')_____no_output_____class TaxiCab:
def __init__(self, data):
self.data = data
self.double_visit = []
self.position = {'x': 0, 'y': 0}
self.direction = {'x': 0, 'y': 1}
self.grid = {i: {j: 0 for j in range(-500, 501)} for i in range(-500, 501)}
def run(self):
for instruction in self.data:
toward = instruction[0]
length = int(instruction[1:])
self.move(toward, length)
def move(self, toward, length):
if toward == 'R':
if self.direction['x'] == 0:
# from UP
if self.direction['y'] == 1:
self.position['x'] += length
self.direction['x'] = 1
for i in range(self.position['x'] - length, self.position['x']):
self.grid[self.position['y']][i] += 1
if self.grid[self.position['y']][i] > 1:
self.double_visit.append((i, self.position['y']))
# from DOWN
else:
self.position['x'] -= length
self.direction['x'] = -1
for i in range(self.position['x'] + length, self.position['x'], -1):
self.grid[self.position['y']][i] += 1
if self.grid[self.position['y']][i] > 1:
self.double_visit.append((i, self.position['y']))
self.direction['y'] = 0
else:
# FROM RIGHT
if self.direction['x'] == 1:
self.position['y'] -= length
self.direction['y'] = -1
for i in range(self.position['y'] + length, self.position['y'], -1):
self.grid[i][self.position['x']] += 1
if self.grid[i][self.position['x']] > 1:
self.double_visit.append((self.position['x'], i))
# FROM LEFT
else:
self.position['y'] += length
self.direction['y'] = 1
for i in range(self.position['y'] - length, self.position['y']):
self.grid[i][self.position['x']] += 1
if self.grid[i][self.position['x']] > 1:
self.double_visit.append((self.position['x'], i))
self.direction['x'] = 0
else:
if self.direction['x'] == 0:
# from UP
if self.direction['y'] == 1:
self.position['x'] -= length
self.direction['x'] = -1
for i in range(self.position['x'] + length, self.position['x'], -1):
self.grid[self.position['y']][i] += 1
if self.grid[self.position['y']][i] > 1:
self.double_visit.append((i, self.position['y']))
# from DOWN
else:
self.position['x'] += length
self.direction['x'] = 1
for i in range(self.position['x'] - length, self.position['x']):
self.grid[self.position['y']][i] += 1
if self.grid[self.position['y']][i] > 1:
self.double_visit.append((i, self.position['y']))
self.direction['y'] = 0
else:
# FROM RIGHT
if self.direction['x'] == 1:
self.position['y'] += length
self.direction['y'] = 1
for i in range(self.position['y'] - length, self.position['y']):
self.grid[i][self.position['x']] += 1
if self.grid[i][self.position['x']] > 1:
self.double_visit.append((self.position['x'], i))
# FROM LEFT
else:
self.position['y'] -= length
self.direction['y'] = -1
for i in range(self.position['y'] + length, self.position['y'], -1):
self.grid[i][self.position['x']] += 1
if self.grid[i][self.position['x']] > 1:
self.double_visit.append((self.position['x'], i))
self.direction['x'] = 0
def get_distance(self):
return sum([abs(i) for i in self.position.values()])
def get_distance_first_double_visit(self):
return sum(self.double_visit[0]) if len(self.double_visit) > 0 else 0_____no_output_____# Test
def test(data, result):
tc = TaxiCab(data)
tc.run()
assert tc.get_distance() == result_____no_output_____test(data=['R2', 'L3'], result=5)
test(data=['R2', 'R2', 'R2'], result=2)
test(data=['R5', 'L5', 'R5', 'R3'], result=12)_____no_output_____tc = TaxiCab(data)
tc.run()
tc.get_distance()_____no_output_____
</code>
<code>
# Test
def test(data, result):
tc = TaxiCab(data)
tc.run()
assert tc.get_distance_first_double_visit() == result_____no_output_____test(data=['R8', 'R4', 'R4', 'R8'], result=4)_____no_output_____tc.get_distance_first_double_visit()_____no_output_____
</code>
| {
"repository": "bbglab/adventofcode",
"path": "2016/loris/day_1.ipynb",
"matched_keywords": [
"STAR"
],
"stars": null,
"size": 10515,
"hexsha": "d0042eab5854b447de51a429a272d3a09f8991fe",
"max_line_length": 277,
"avg_line_length": 36.5104166667,
"alphanum_fraction": 0.4633380884
} |
# Notebook from rabest265/GunViolence
Path: Code/demographics_Lat_Long.ipynb
<code>
#API calls to Google Maps for Lat & Long_____no_output_____# Dependencies
import requests
import json
from config import gkey
import os
import csv
import pandas as pd
import numpy as np
_____no_output_____# Load CSV file
csv_path = os.path.join('..',"output", "demographics.csv")
# Read Purchasing File and store into Pandas data frame
cities_df = pd.read_csv(csv_path, encoding = "ISO-8859-1")
cities_df.head()_____no_output_____params = {"key": gkey}
# Loop through the cities_df that do not have lat & long and run a lat/long search for each city
for index, row in cities_df.iterrows():
if(pd.isnull(row['Lat'])):
base_url = "https://maps.googleapis.com/maps/api/geocode/json"
city = row['city_name']
state = row['State']
citystate=city + ", "+ state
print (citystate)
# update address key value
params['address'] = f"{city},{state}"
# make request
cities_lat_lng = requests.get(base_url, params=params)
# print the cities_lat_lng url, avoid doing for public github repos in order to avoid exposing key
# print(cities_lat_lng.url)
# convert to json
cities_lat_lng = cities_lat_lng.json()
cities_df.loc[index, "Lat"] = cities_lat_lng["results"][0]["geometry"]["location"]["lat"]
cities_df.loc[index, "Lng"] = cities_lat_lng["results"][0]["geometry"]["location"]["lng"]
# Visualize to confirm lat lng appear
cities_df.head()Quincy, WA
Raft Island, WA
Rainier, WA
Ravensdale, WA
Raymond, WA
Reardan, WA
Redmond, WA
Renton, WA
Republic, WA
Richland, WA
Ridgefield, WA
Ritzville, WA
Riverbend, WA
River Road, WA
Riverside, WA
Rochester, WA
Rockford, WA
Rock Island, WA
Rockport, WA
Rocky Point, WA
Ronald, WA
Roosevelt, WA
Rosalia, WA
Rosburg, WA
Rosedale, WA
Roslyn, WA
Roy, WA
Royal City, WA
Ruston, WA
Ryderwood, WA
St. John, WA
Salmon Creek, WA
Sammamish, WA
Santiago, WA
Satsop, WA
Seabeck, WA
SeaTac, WA
Seattle, WA
Sedro-Woolley, WA
Sekiu, WA
Selah, WA
Sequim, WA
Shadow Lake, WA
Shelton, WA
Shoreline, WA
Silvana, WA
Silverdale, WA
Silver Firs, WA
Sisco Heights, WA
Skamokawa Valley, WA
Skokomish, WA
Skykomish, WA
Snohomish, WA
Snoqualmie, WA
Snoqualmie Pass, WA
Soap Lake, WA
South Bend, WA
South Cle Elum, WA
South Creek, WA
South Hill, WA
South Prairie, WA
South Wenatchee, WA
Southworth, WA
Spanaway, WA
Spangle, WA
Spokane, WA
Spokane Valley, WA
Sprague, WA
Springdale, WA
Stansberry Lake, WA
Stanwood, WA
Starbuck, WA
Startup, WA
Steilacoom, WA
Steptoe, WA
Stevenson, WA
Sudden Valley, WA
Sultan, WA
Sumas, WA
Summit, WA
Summit View, WA
Summitview, WA
Sumner, WA
Sunday Lake, WA
Sunnyside, WA
Sunnyslope, WA
Suquamish, WA
Swede Heaven, WA
Tacoma, WA
Taholah, WA
Tampico, WA
Tanglewilde, WA
Tanner, WA
Tekoa, WA
Tenino, WA
Terrace Heights, WA
Thorp, WA
Three Lakes, WA
Tieton, WA
Tokeland, WA
Toledo, WA
Tonasket, WA
Toppenish, WA
Torboy, WA
Touchet, WA
Town and Country, WA
Tracyton, WA
Trout Lake, WA
Tukwila, WA
Tumwater, WA
Twin Lakes, WA
Twisp, WA
Union, WA
Union Gap, WA
Union Hill-Novelty Hill, WA
Uniontown, WA
University Place, WA
Upper Elochoman, WA
Vader, WA
Valley, WA
Vancouver, WA
Vantage, WA
Vashon, WA
Vaughn, WA
Venersborg, WA
Verlot, WA
Waitsburg, WA
Walla Walla, WA
Walla Walla East, WA
Waller, WA
Wallula, WA
Walnut Grove, WA
Wapato, WA
Warden, WA
Warm Beach, WA
Washougal, WA
Washtucna, WA
Waterville, WA
Wauna, WA
Waverly, WA
Wenatchee, WA
West Clarkston-Highland, WA
West Pasco, WA
Westport, WA
West Richland, WA
West Side Highway, WA
Whidbey Island Station, WA
White Center, WA
White Salmon, WA
White Swan, WA
Wilbur, WA
Wilderness Rim, WA
Wilkeson, WA
Willapa, WA
Wilson Creek, WA
Winlock, WA
Winthrop, WA
Wishram, WA
Wollochet, WA
Woodinville, WA
Woodland, WA
Woods Creek, WA
Woodway, WA
Yacolt, WA
Yakima, WA
Yarrow Point, WA
Yelm, WA
Zillah, WA
Accoville, WV
Addison, WV
Albright, WV
Alderson, WV
Alum Creek, WV
Amherstdale, WV
Anawalt, WV
Anmoore, WV
Ansted, WV
Apple Grove, WV
Arbovale, WV
Athens, WV
Auburn, WV
Aurora, WV
Bancroft, WV
Barboursville, WV
Barrackville, WV
Bartley, WV
Bartow, WV
Bath, WV
Bayard, WV
Beards Fork, WV
Beaver, WV
Beckley, WV
Beech Bottom, WV
Belington, WV
Belle, WV
Belmont, WV
Belva, WV
Benwood, WV
Bergoo, WV
Berwind, WV
Bethany, WV
Bethlehem, WV
Beverly, WV
Big Chimney, WV
Big Creek, WV
Big Sandy, WV
Birch River, WV
Blacksville, WV
Blennerhassett, WV
Bluefield, WV
Bluewell, WV
Boaz, WV
Bolivar, WV
Bolt, WV
Boomer, WV
Bowden, WV
Bradley, WV
Bradshaw, WV
Bramwell, WV
Brandonville, WV
Brandywine, WV
Brenton, WV
Bridgeport, WV
Brookhaven, WV
Bruceton Mills, WV
Bruno, WV
Brush Fork, WV
Buckhannon, WV
Bud, WV
Buffalo, WV
Burlington, WV
Burnsville, WV
Cairo, WV
Camden-on-Gauley, WV
Cameron, WV
Capon Bridge, WV
Carolina, WV
Carpendale, WV
Cass, WV
Cassville, WV
Cedar Grove, WV
Century, WV
Ceredo, WV
Chapmanville, WV
Charleston, WV
Charles Town, WV
Charlton Heights, WV
Chattaroy, WV
Chauncey, WV
Cheat Lake, WV
Chelyan, WV
Chesapeake, WV
Chester, WV
Clarksburg, WV
Clay, WV
Clearview, WV
Clendenin, WV
Coal City, WV
Coal Fork, WV
Comfort, WV
Corinne, WV
Covel, WV
Cowen, WV
Crab Orchard, WV
Craigsville, WV
Cross Lanes, WV
Crum, WV
Crumpler, WV
Cucumber, WV
Culloden, WV
Dailey, WV
Daniels, WV
Danville, WV
Davis, WV
Davy, WV
Deep Water, WV
Delbarton, WV
Despard, WV
Dixie, WV
Dunbar, WV
Durbin, WV
East Bank, WV
East Dailey, WV
Eccles, WV
Eleanor, WV
Elizabeth, WV
Elk Garden, WV
Elkins, WV
Elkview, WV
Ellenboro, WV
Enterprise, WV
Fairlea, WV
Fairmont, WV
Fairview, WV
Falling Spring, WV
Falling Waters, WV
Falls View, WV
Farmington, WV
Fayetteville, WV
Fenwick, WV
Flatwoods, WV
Flemington, WV
Follansbee, WV
Fort Ashby, WV
Fort Gay, WV
Frank, WV
Franklin, WV
Friendly, WV
Gallipolis Ferry, WV
Galloway, WV
Gary, WV
Gassaway, WV
Gauley Bridge, WV
Ghent, WV
Gilbert, WV
Gilbert Creek, WV
Glasgow, WV
Glen Dale, WV
Glen Ferris, WV
Glen Fork, WV
Glen Jean, WV
Glenville, WV
Glen White, WV
Grafton, WV
Grantsville, WV
Grant Town, WV
Granville, WV
Great Cacapon, WV
Green Bank, WV
Green Spring, WV
Greenview, WV
Gypsy, WV
Hambleton, WV
Hamlin, WV
Handley, WV
Harman, WV
Harpers Ferry, WV
Harrisville, WV
Hartford City, WV
Harts, WV
Hedgesville, WV
Helen, WV
Helvetia, WV
Henderson, WV
Hendricks, WV
Henlawson, WV
Hepzibah, WV
Hico, WV
Hillsboro, WV
Hilltop, WV
Hinton, WV
Holden, WV
Hometown, WV
Hooverson Heights, WV
Hundred, WV
Huntersville, WV
Huntington, WV
Hurricane, WV
Huttonsville, WV
Iaeger, WV
Idamay, WV
Inwood, WV
Itmann, WV
Jacksonburg, WV
Jane Lew, WV
Jefferson, WV
Junior, WV
Justice, WV
Kenova, WV
Kermit, WV
Keyser, WV
Keystone, WV
Kimball, WV
Kimberly, WV
Kincaid, WV
Kingwood, WV
Kistler, WV
Kopperston, WV
Lashmeet, WV
Lavalette, WV
Leon, WV
Lesage, WV
Lester, WV
Lewisburg, WV
Littleton, WV
Logan, WV
Lost Creek, WV
Lubeck, WV
Lumberport, WV
Mabscott, WV
MacArthur, WV
McConnell, WV
McMechen, WV
Madison, WV
Mallory, WV
Man, WV
Mannington, WV
Marlinton, WV
Marmet, WV
Martinsburg, WV
Mason, WV
Masontown, WV
Matewan, WV
Matheny, WV
Matoaka, WV
Maybeury, WV
Meadow Bridge, WV
Middlebourne, WV
Middleway, WV
Mill Creek, WV
Milton, WV
Minden, WV
Mineralwells, WV
Mitchell Heights, WV
Monaville, WV
Monongah, WV
Montcalm, WV
Montgomery, WV
Montrose, WV
Moorefield, WV
Morgantown, WV
Moundsville, WV
Mount Carbon, WV
Mount Gay-Shamrock, WV
Mount Hope, WV
Mullens, WV
Neibert, WV
Nettie, WV
Newburg, WV
New Cumberland, WV
Newell, WV
New Haven, WV
New Martinsville, WV
New Richmond, WV
Nitro, WV
Northfork, WV
North Hills, WV
Nutter Fort, WV
Oak Hill, WV
Oakvale, WV
Oceana, WV
Omar, WV
Paden City, WV
Page, WV
Pageton, WV
Parcoal, WV
Parkersburg, WV
Parsons, WV
Paw Paw, WV
Pax, WV
Pea Ridge, WV
Pennsboro, WV
Pentress, WV
Petersburg, WV
Peterstown, WV
Philippi, WV
Pickens, WV
Piedmont, WV
Pinch, WV
Pine Grove, WV
Pineville, WV
Piney View, WV
Pleasant Valley, WV
Poca, WV
Point Pleasant, WV
Powellton, WV
Pratt, WV
Prichard, WV
Prince, WV
Princeton, WV
Prosperity, WV
Pullman, WV
Quinwood, WV
Rachel, WV
Racine, WV
Rainelle, WV
Rand, WV
Ranson, WV
Ravenswood, WV
Raysal, WV
Reader, WV
Red Jacket, WV
Reedsville, WV
Reedy, WV
Rhodell, WV
Richwood, WV
Ridgeley, WV
Ripley, WV
Rivesville, WV
Robinette, WV
Roderfield, WV
Romney, WV
Ronceverte, WV
Rossmore, WV
Rowlesburg, WV
Rupert, WV
St. Albans, WV
St. George, WV
St. Marys, WV
Salem, WV
Salt Rock, WV
Sand Fork, WV
Sarah Ann, WV
Scarbro, WV
Shady Spring, WV
Shannondale, WV
Shenandoah Junction, WV
Shepherdstown, WV
Shinnston, WV
Shrewsbury, WV
Sissonville, WV
Sistersville, WV
Smithers, WV
Smithfield, WV
Sophia, WV
South Charleston, WV
Spelter, WV
Spencer, WV
Springfield, WV
Stanaford, WV
Star City, WV
Stollings, WV
Stonewood, WV
Summersville, WV
Sutton, WV
Switzer, WV
Sylvester, WV
Teays Valley, WV
Terra Alta, WV
Thomas, WV
Thurmond, WV
Tioga, WV
Tornado, WV
Triadelphia, WV
Tunnelton, WV
Twilight, WV
Union, WV
Valley Bend, WV
Valley Grove, WV
Valley Head, WV
Van, WV
Verdunville, WV
Vienna, WV
Vivian, WV
Wallace, WV
War, WV
Wardensville, WV
Washington, WV
Waverly, WV
Wayne, WV
Weirton, WV
Welch, WV
Wellsburg, WV
West Hamlin, WV
West Liberty, WV
West Logan, WV
West Milford, WV
Weston, WV
Westover, WV
West Union, WV
Wheeling, WV
White Hall, WV
White Sulphur Springs, WV
Whitesville, WV
Whitmer, WV
Wiley Ford, WV
Williamson, WV
Williamstown, WV
Windsor Heights, WV
Winfield, WV
Wolf Summit, WV
Womelsdorf, WV
Worthington, WV
Abbotsford, WI
Abrams, WI
Adams, WI
Adell, WI
Albany, WI
Algoma, WI
Allenton, WI
Allouez, WI
Alma, WI
Alma Center, WI
Almena, WI
Almond, WI
Altoona, WI
Amberg, WI
Amery, WI
Amherst, WI
Amherst Junction, WI
Angelica, WI
Aniwa, WI
Antigo, WI
Appleton, WI
Arcadia, WI
Arena, WI
Argonne, WI
Argyle, WI
Arkansaw, WI
Arkdale, WI
Arlington, WI
Arpin, WI
cities_df.head()
cities_df.to_csv("../Output/cities.csv", index=False, header=True)_____no_output_____
</code>
| {
"repository": "rabest265/GunViolence",
"path": "Code/demographics_Lat_Long.ipynb",
"matched_keywords": [
"STAR",
"Salmon"
],
"stars": null,
"size": 51222,
"hexsha": "d0069e2a36204df8606dc23f8e75ef7c3b8b2179",
"max_line_length": 116,
"avg_line_length": 26.0406710727,
"alphanum_fraction": 0.4398305416
} |
# Notebook from debugevent90901/courseArchive
Path: ECE365/genomics/Genomics_Lab4/ECE365-Genomics-Lab4-Spring21.ipynb
# Lab 4: EM Algorithm and Single-Cell RNA-seq Data_____no_output_____### Name: Your Name Here (Your netid here)_____no_output_____### Due April 2, 2021 11:59 PM_____no_output_____#### Preamble (Don't change this)_____no_output_____## Important Instructions -
1. Please implement all the *graded functions* in main.py file. Do not change function names in main.py.
2. Please read the description of every graded function very carefully. The description clearly states what is the expectation of each graded function.
3. After some graded functions, there is a cell which you can run and see if the expected output matches the output you are getting.
4. The expected output provided is just a way for you to assess the correctness of your code. The code will be tested on several other cases as well._____no_output_____
<code>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns_____no_output_____%run main.py_____no_output_____module = Lab4()_____no_output_____
</code>
## Part 1 : Expectation-Maximization (EM) algorithm for transcript quantification_____no_output_____## Introduction
The EM algorithm is a very helpful tool to compute maximum likelihood estimates of parameters in models that have some latent (hidden) variables.
In the case of the transcript quantification problem, the model parameters we want to estimate are the transcript relative abundances $\rho_1,...,\rho_K$.
The latent variables are the read-to-transcript indicator variables $Z_{ik}$, which indicate whether the $i$th read comes from the $k$th transcript (in which case $Z_{ik}=1$.
In this part of the lab, you will be given the read alignment data.
For each read and transcript pair, it tells you whether the read can be mapped (i.e., aligned) to that transcript.
Using the EM algorithm, you will estimate the relative abundances of the trascripts.
_____no_output_____### Reading read transcript data - We have 30000 reads and 30 transcripts_____no_output_____
<code>
n_reads=30000
n_transcripts=30
read_mapping=[]
with open("read_mapping_data.txt",'r') as file :
lines_reads=file.readlines()
for line in lines_reads :
read_mapping.append([int(x) for x in line.split(",")])_____no_output_____read_mapping[:10]_____no_output_____
</code>
Rather than giving you a giant binary matrix, we encoded the read mapping data in a more concise way. read_mapping is a list of lists. The $i$th list contains the indices of the transcripts that the $i$th read maps to._____no_output_____### Reading true abundances and transcript lengths_____no_output_____
<code>
with open("transcript_true_abundances.txt",'r') as file :
lines_gt=file.readlines()
ground_truth=[float(x) for x in lines_gt[0].split(",")]
with open("transcript_lengths.txt",'r') as file :
lines_gt=file.readlines()
tr_lengths=[float(x) for x in lines_gt[0].split(",")]_____no_output_____ground_truth[:5]_____no_output_____tr_lengths[:5]_____no_output_____
</code>
## Graded Function 1 : expectation_maximization (10 marks)
Purpose : To implement the EM algorithm to obtain abundance estimates for each transcript.
E-step : In this step, we calculate the fraction of read that is assigned to each transcript (i.e., the estimate of $Z_{ik}$). For read $i$ and transicript $k$, this is calculated by dividing the current abundance estimate of transcript $k$ by the sum of abundance estimates of all transcripts that read $i$ maps to.
M-step : In this step, we update the abundance estimate of each transcript based on the fraction of all reads that is currently assigned to the transcript. First we compute the average fraction of all reads assigned to the transcript. Then, (if transcripts are of different lengths) we divide the result by the transcript length.
Finally, we normalize all abundance estimates so that they add up to 1.
Inputs - read_mapping (which is a list of lists where each sublist contains the transcripts to which a particular read belongs to. The length of this list is equal to the number of reads, i.e. 30000; tr_lengths (a list containing the length of the 30 transcripts, in order); n_iterations (the number of EM iterations to be performed)
Output - a list of lists where each sublist contains the abundance estimates for a transcript across all iterations. The length of each sublist should be equal to the number of iterations plus one (for the initialization) and the total number of sublists should be equal to the number of transcripts._____no_output_____
<code>
history=module.expectation_maximization(read_mapping,tr_lengths,20)
print(len(history))
print(len(history[0]))
print(history[0][-5:])
print(history[1][-5:])
print(history[2][-5:])30
21
[0.033769639494636614, 0.03381298624783303, 0.03384568373972949, 0.0338703482393148, 0.03388895326082054]
[0.0020082674603036053, 0.0019649207071071456, 0.0019322232152109925, 0.0019075587156241912, 0.0018889536941198502]
[0.0660581789629968, 0.06606927656035864, 0.0660765012689558, 0.06608120466668756, 0.0660842666518177]
</code>
## Expected Output -
30
21
[0.033769639494636614, 0.03381298624783303, 0.03384568373972948, 0.0338703482393148, 0.03388895326082054]
[0.0020082674603036053, 0.0019649207071071456, 0.0019322232152109925, 0.0019075587156241912, 0.0018889536941198502]
[0.0660581789629968, 0.06606927656035864, 0.06607650126895578, 0.06608120466668756, 0.0660842666518177]
_____no_output_____You can use the following function to visualize how the estimated relative abundances are converging with the number of iterations of the algorithm._____no_output_____
<code>
def visualize_em(history,n_iterations) :
#start code here
fig, ax = plt.subplots(figsize=(8,6))
for j in range(n_transcripts):
ax.plot([i for i in range(n_iterations+1)],[history[j][i] - ground_truth[j] for i in range(n_iterations+1)],marker='o')
#end code here_____no_output_____visualize_em(history,20)_____no_output_____
</code>
## Part 2 : Exploring Single-Cell RNA-seq data_____no_output_____In a study published in 2015, Zeisel et al. used single-cell RNA-seq data to explore the cell diversity in the mouse brain.
We will explore the data used for their study.
You can read more about it [here](https://science.sciencemag.org/content/347/6226/1138)._____no_output_____
<code>
#reading single-cell RNA-seq data
lines_genes=[]
with open("Zeisel_expr.txt",'r') as file :
lines_genes=file.readlines()_____no_output_____lines_genes[0][:300]_____no_output_____
</code>
Each line in the file Zeisel_expr.txt corresponds to one gene.
The columns correspond to different cells (notice that this is the opposite of how we looked at this matrix in class).
The entries of this matrix correspond to the number of reads mapping to a given gene in the corresponding cell._____no_output_____
<code>
# reading true labels for each cell
with open("Zeisel_labels.txt",'r') as file :
true_labels = file.read().splitlines()_____no_output_____
</code>
The study also provides us with true labels for each of the cells.
For each of the cells, the vector true_labels contains the name of the cell type.
There are nine different cell types in this dataset._____no_output_____
<code>
set(true_labels)_____no_output_____
</code>
## Graded Function 2 : prepare_data (10 marks) :
Purpose - To create a dataframe where each row corresponds to a specific cell and each column corresponds to the expressions levels of a particular gene across all cells.
You should name the columns as "Gene_1", "Gene_2", and so on.
We will iterate through all the lines in lines_genes list created above, add 1 to each value and take log.
Each line will correspond to 1 column in the dataframe
Output - gene expression dataframe
### Note - All the values in the output dataframe should be rounded off to 5 digits after the decimal_____no_output_____
<code>
data_df=module.prepare_data(lines_genes)
print(data_df.shape)
print(data_df.iloc[0:3,:5])(3005, 19972)
Gene_0 Gene_1 Gene_2 Gene_3 Gene_4
0 0.0 1.38629 1.38629 0.0 0.69315
1 0.0 0.69315 0.69315 0.0 0.69315
2 0.0 0.00000 1.94591 0.0 0.69315
print(data_df.columns)Index(['Gene_0', 'Gene_1', 'Gene_2', 'Gene_3', 'Gene_4', 'Gene_5', 'Gene_6',
'Gene_7', 'Gene_8', 'Gene_9',
...
'Gene_19962', 'Gene_19963', 'Gene_19964', 'Gene_19965', 'Gene_19966',
'Gene_19967', 'Gene_19968', 'Gene_19969', 'Gene_19970', 'Gene_19971'],
dtype='object', length=19972)
</code>
## Expected Output :
``(3005, 19972)``
`` Gene_0 Gene_1 Gene_2 Gene_3 Gene_4``
``0 0.0 1.38629 1.38629 0.0 0.69315``
``1 0.0 0.69315 0.69315 0.0 0.69315``
``2 0.0 0.00000 1.94591 0.0 0.69315``_____no_output_____## Graded Function 3 : identify_less_expressive_genes (10 marks)
Purpose : To identify genes (columns) that are expressed in less than 25 cells. We will create a list of all gene columns that have values greater than 0 for less than 25 cells.
Input - gene expression dataframe
Output - list of column names which are expressed in less than 25 cells_____no_output_____
<code>
drop_columns = module.identify_less_expressive_genes(data_df)
print(len(drop_columns))
print(drop_columns[:10])5120
Index(['Gene_28', 'Gene_126', 'Gene_145', 'Gene_146', 'Gene_151', 'Gene_152',
'Gene_167', 'Gene_168', 'Gene_170', 'Gene_173'],
dtype='object')
</code>
## Expected Output :
``5120``
``['Gene_28', 'Gene_126', 'Gene_145', 'Gene_146', 'Gene_151', 'Gene_152', 'Gene_167', 'Gene_168', 'Gene_170', 'Gene_173']``_____no_output_____### Filtering less expressive genes
We will now create a new dataframe in which genes which are expressed in less than 25 cells will not be present_____no_output_____
<code>
df_new = data_df.drop(drop_columns, axis=1)_____no_output_____df_new.head()_____no_output_____
</code>
## Graded Function 4 : perform_pca (10 marks)
Pupose - Perform Principal Component Analysis on the new dataframe and take the top 50 principal components
Input - df_new
Output - numpy array containing the top 50 principal components of the data.
### Note - All the values in the output should be rounded off to 5 digits after the decimal
### Note - Please use random_state=365 for the PCA object you will create_____no_output_____
<code>
pca_data=module.perform_pca(df_new)
print(pca_data.shape)
print(type(pca_data))
print(pca_data[0:3,:5])(3005, 50)
<class 'numpy.ndarray'>
[[26.97148 -2.7244 0.62163 25.90148 -6.24736]
[26.49135 -1.58774 -4.79315 24.01094 -7.25618]
[47.82664 5.06799 2.15177 30.24367 -3.38878]]
</code>
## Expected Output :
``(3005, 50)``
``<class 'numpy.ndarray'>``
``[[26.97148 -2.7244 0.62163 25.90148 -6.24736]``
`` [26.49135 -1.58774 -4.79315 24.01094 -7.25618]``
`` [47.82664 5.06799 2.15177 30.24367 -3.38878]]``_____no_output_____## (Non-graded) Function 5 : perform_tsne
Pupose - Perform t-SNE on the pca_data and obtain 2 t-SNE components
We will use TSNE class of the sklearn.manifold package. Use random_state=1000 and perplexity=50
Documenation can be found here - https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
Input - pca_data
Output - numpy array containing the top 2 tsne components of the data.
**Note: This function will not be graded because of the random nature of t-SNE.**_____no_output_____
<code>
tsne_data50 = module.perform_tsne(pca_data)
print(tsne_data50.shape)
print(tsne_data50[:3,:])(3005, 2)
[[ 19.031317 -45.3434 ]
[ 19.188553 -44.945473]
[ 17.369982 -47.997364]]
</code>
## Expected Output :
(These numbers can deviate a bit depending on your sklearn)
``(3005, 2)``
``[[ 15.069608 -47.535984]``
`` [ 15.251476 -47.172073]``
`` [ 13.3932 -49.909657]]``_____no_output_____
<code>
fig, ax = plt.subplots(figsize=(12,8))
sns.scatterplot(tsne_data50[:,0], tsne_data50[:,1], hue=true_labels)
plt.show()/usr/local/lib/python3.9/site-packages/seaborn/_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
</code>
Notice that the different cell types form clusters (which can be easily visualized on the t-SNE space).
Zeisel et al. performed clustering on this data in order to identify and label the different cell types.
You can try using clustering methods (such as k-means and GMM) to cluster the single-cell RNA-seq data of Zeisel at al. and see if your results agree with theirs!_____no_output_____
| {
"repository": "debugevent90901/courseArchive",
"path": "ECE365/genomics/Genomics_Lab4/ECE365-Genomics-Lab4-Spring21.ipynb",
"matched_keywords": [
"RNA-seq",
"single-cell"
],
"stars": null,
"size": 1023042,
"hexsha": "d00741055dc800ea60b86da8dd05cb6e0b604bae",
"max_line_length": 602447,
"avg_line_length": 1344.3390275953,
"alphanum_fraction": 0.7113657113
} |
# Notebook from justinshaffer/Extraction_kit_benchmarking
Path: code/Taxon profile analysis.ipynb
# Set-up notebook environment
## NOTE: Use a QIIME2 kernel_____no_output_____
<code>
import numpy as np
import pandas as pd
import seaborn as sns
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import re
from pandas import *
import matplotlib.pyplot as plt
%matplotlib inline
from qiime2.plugins import feature_table
from qiime2 import Artifact
from qiime2 import Metadata
import biom
from biom.table import Table
from qiime2.plugins import diversity
from scipy.stats import ttest_ind
from scipy.stats.stats import pearsonr
%config InlineBackend.figure_formats = ['svg']
from qiime2.plugins.feature_table.methods import relative_frequency
import biom
import qiime2 as q2
import os
import math
_____no_output_____
</code>
# Import sample metadata_____no_output_____
<code>
meta = q2.Metadata.load('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/sample_metadata/12201_metadata.txt').to_dataframe()
_____no_output_____
</code>
Separate round 1 and round 2 and exclude round 1 Zymo, Homebrew, and MagMAX Beta_____no_output_____
<code>
meta_r1 = meta[meta['round'] == 1]
meta_clean_r1_1 = meta_r1[meta_r1['extraction_kit'] != 'Zymo MagBead']
meta_clean_r1_2 = meta_clean_r1_1[meta_clean_r1_1['extraction_kit'] != 'Homebrew']
meta_clean_r1 = meta_clean_r1_2[meta_clean_r1_2['extraction_kit'] != 'MagMax Beta']
meta_clean_r2 = meta[meta['round'] == 2]
_____no_output_____
</code>
Remove PowerSoil samples from each round - these samples will be used as the baseline _____no_output_____
<code>
meta_clean_r1_noPS = meta_clean_r1[meta_clean_r1['extraction_kit'] != 'PowerSoil']
meta_clean_r2_noPS = meta_clean_r2[meta_clean_r2['extraction_kit'] != 'PowerSoil']
_____no_output_____
</code>
Create tables including only round 1 or round 2 PowerSoil samples_____no_output_____
<code>
meta_clean_r1_onlyPS = meta_clean_r1[meta_clean_r1['extraction_kit'] == 'PowerSoil']
meta_clean_r2_onlyPS = meta_clean_r2[meta_clean_r2['extraction_kit'] == 'PowerSoil']
_____no_output_____
</code>
Merge PowerSoil samples from round 2 with other samples from round 1, and vice versa - this will allow us to get the correlations between the two rounds of PowerSoil_____no_output_____
<code>
meta_clean_r1_with_r2_PS = pd.concat([meta_clean_r1_noPS, meta_clean_r2_onlyPS])
meta_clean_r2_with_r1_PS = pd.concat([meta_clean_r2_noPS, meta_clean_r1_onlyPS])
_____no_output_____
</code>
## Collapse feature-table to the desired level (e.g., genus)_____no_output_____16S_____no_output_____
<code>
qiime taxa collapse \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/10_filtered_data/dna_bothPS_16S_deblur_biom_lod_noChl_noMit_sepp_gg_noNTCs_noMock.qza \
--i-taxonomy /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/06_taxonomy/dna_all_16S_deblur_seqs_taxonomy_silva138.qza \
--p-level 6 \
--o-collapsed-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/10_filtered_data/dna_bothPS_16S_deblur_biom_lod_noChl_noMit_sepp_gg_noNTCs_noMock_taxa_collapse_genus.qza
qiime feature-table summarize \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/10_filtered_data/dna_bothPS_16S_deblur_biom_lod_noChl_noMit_sepp_gg_noNTCs_noMock_taxa_collapse_genus.qza \
--o-visualization /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/10_filtered_data/dna_bothPS_16S_deblur_biom_lod_noChl_noMit_sepp_gg_noNTCs_noMock_taxa_collapse_genus.qzv
# There are 846 samples and 1660 features
_____no_output_____
</code>
ITS_____no_output_____
<code>
qiime taxa collapse \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/08_filtered_data/dna_bothPS_ITS_deblur_biom_lod_noNTCs_noMock.qza \
--i-taxonomy /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/06_taxonomy/dna_all_ITS_deblur_seqs_taxonomy_unite8.qza \
--p-level 6 \
--o-collapsed-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/08_filtered_data/dna_bothPS_ITS_deblur_biom_lod_noNTCs_noMock_taxa_collapse_genus.qza
qiime feature-table summarize \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/08_filtered_data/dna_bothPS_ITS_deblur_biom_lod_noNTCs_noMock_taxa_collapse_genus.qza \
--o-visualization /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/08_filtered_data/dna_bothPS_ITS_deblur_biom_lod_noNTCs_noMock_taxa_collapse_genus.qzv
# There are 978 samples and 791 features
_____no_output_____
</code>
Shotgun_____no_output_____
<code>
qiime taxa collapse \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/03_filtered_data/dna_bothPS_shotgun_woltka_wol_biom_noNTCs_noMock.qza \
--i-taxonomy /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/wol_taxonomy.qza \
--p-level 6 \
--o-collapsed-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/03_filtered_data/dna_bothPS_shotgun_woltka_wol_biom_noNTCs_noMock_taxa_collapse_genus.qza
qiime feature-table summarize \
--i-table /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/03_filtered_data/dna_bothPS_shotgun_woltka_wol_biom_noNTCs_noMock_taxa_collapse_genus.qza \
--o-visualization /Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/03_filtered_data/dna_bothPS_shotgun_woltka_wol_biom_noNTCs_noMock_taxa_collapse_genus.qzv
# There are 1044 samples and 2060 features
_____no_output_____
</code>
# Import feature-tables_____no_output_____
<code>
dna_bothPS_16S_genus_qza = q2.Artifact.load('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/16S/10_filtered_data/dna_bothPS_16S_deblur_biom_lod_noChl_noMit_sepp_gg_noNTCs_noMock_taxa_collapse_genus.qza')
dna_bothPS_ITS_genus_qza = q2.Artifact.load('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/ITS/08_filtered_data/dna_bothPS_ITS_deblur_biom_lod_noNTCs_noMock_taxa_collapse_genus.qza')
dna_bothPS_shotgun_genus_qza = q2.Artifact.load('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/data/shotgun/03_filtered_data/dna_bothPS_shotgun_woltka_wol_biom_noNTCs_noMock_taxa_collapse_genus.qza')
_____no_output_____
</code>
# Convert QZA to a Pandas DataFrame_____no_output_____
<code>
dna_bothPS_16S_genus_df = dna_bothPS_16S_genus_qza.view(pd.DataFrame)
dna_bothPS_ITS_genus_df = dna_bothPS_ITS_genus_qza.view(pd.DataFrame)
dna_bothPS_shotgun_genus_df = dna_bothPS_shotgun_genus_qza.view(pd.DataFrame)
_____no_output_____
</code>
# Melt dataframes_____no_output_____
<code>
dna_bothPS_16S_genus_df_melt = dna_bothPS_16S_genus_df.unstack()
dna_bothPS_ITS_genus_df_melt = dna_bothPS_ITS_genus_df.unstack()
dna_bothPS_shotgun_genus_df_melt = dna_bothPS_shotgun_genus_df.unstack()
dna_bothPS_16S_genus = pd.DataFrame(dna_bothPS_16S_genus_df_melt)
dna_bothPS_ITS_genus = pd.DataFrame(dna_bothPS_ITS_genus_df_melt)
dna_bothPS_shotgun_genus = pd.DataFrame(dna_bothPS_shotgun_genus_df_melt)
_____no_output_____dna_bothPS_16S_genus.reset_index(inplace=True)
dna_bothPS_16S_genus.rename(columns={'level_0':'taxa','level_1':'sample',0:'counts'}, inplace=True)
dna_bothPS_ITS_genus.reset_index(inplace=True)
dna_bothPS_ITS_genus.rename(columns={'level_0':'taxa','level_1':'sample',0:'counts'}, inplace=True)
dna_bothPS_shotgun_genus.reset_index(inplace=True)
dna_bothPS_shotgun_genus.rename(columns={'level_0':'taxa','level_1':'sample',0:'counts'}, inplace=True)
_____no_output_____
</code>
# Wrangle data into long form for each kit_____no_output_____Wrangle metadata_____no_output_____
<code>
# Create empty list of extraction kit IDs
ext_kit_levels = []
# Create empty list of metadata subsets based on levels of variable of interest
ext_kit = []
# Create empty list of baseline samples for each subset
bl = []
# Populate lists with round 1 data
for ext_kit_level, ext_kit_level_df in meta_clean_r1_with_r2_PS.groupby('extraction_kit_round'):
ext_kit.append(ext_kit_level_df)
powersoil_r1_bl = meta_clean_r1_onlyPS[meta_clean_r1_onlyPS.extraction_kit_round == 'PowerSoil r1']
bl.append(powersoil_r1_bl)
ext_kit_levels.append(ext_kit_level)
print('Gathered data for',ext_kit_level)
# Populate lists with round 2 data
for ext_kit_level, ext_kit_level_df in meta_clean_r2_with_r1_PS.groupby('extraction_kit_round'):
ext_kit.append(ext_kit_level_df)
powersoil_r2_bl = meta_clean_r2_onlyPS[meta_clean_r2_onlyPS['extraction_kit_round'] == 'PowerSoil r2']
bl.append(powersoil_r2_bl)
ext_kit_levels.append(ext_kit_level)
print('Gathered data for',ext_kit_level)
# Create empty list for concatenated subset-baseline datasets
subsets_w_bl = {}
# Populate list with subset-baseline data
for ext_kit_level, ext_kit_df, ext_kit_bl in zip(ext_kit_levels, ext_kit, bl):
new_df = pd.concat([ext_kit_bl,ext_kit_df])
subsets_w_bl[ext_kit_level] = new_df
print('Merged data for',ext_kit_level)
Gathered data for Norgen
Gathered data for PowerSoil Pro
Gathered data for PowerSoil r2
Gathered data for MagMAX Microbiome
Gathered data for NucleoMag Food
Gathered data for PowerSoil r1
Gathered data for Zymo MagBead
Merged data for Norgen
Merged data for PowerSoil Pro
Merged data for PowerSoil r2
Merged data for MagMAX Microbiome
Merged data for NucleoMag Food
Merged data for PowerSoil r1
Merged data for Zymo MagBead
</code>
16S_____no_output_____
<code>
list_of_lists = []
for key, value in subsets_w_bl.items():
string = ''.join(key)
#merge metadata subsets with baseline with taxonomy
meta_16S_genera = pd.merge(value, dna_bothPS_16S_genus, left_index=True, right_on='sample')
#create new column
meta_16S_genera['taxa_subject'] = meta_16S_genera['taxa'] + meta_16S_genera['host_subject_id']
#subtract out duplicates and pivot
meta_16S_genera_clean = meta_16S_genera.drop_duplicates(subset = ['taxa_subject', 'extraction_kit_round'], keep = 'first')
meta_16S_genera_pivot = meta_16S_genera_clean.pivot(index='taxa_subject', columns='extraction_kit_round', values='counts')
meta_16S_genera_pivot_clean = meta_16S_genera_pivot.dropna()
# Export dataframe to file
meta_16S_genera_pivot_clean.to_csv('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/table_correlation_16S_genera_%s.txt'%string,
sep = '\t',
index = False)
_____no_output_____
</code>
ITS_____no_output_____
<code>
list_of_lists = []
for key, value in subsets_w_bl.items():
string = ''.join(key)
#merge metadata subsets with baseline with taxonomy
meta_ITS_genera = pd.merge(value, dna_bothPS_ITS_genus, left_index=True, right_on='sample')
#create new column
meta_ITS_genera['taxa_subject'] = meta_ITS_genera['taxa'] + meta_ITS_genera['host_subject_id']
#subtract out duplicates and pivot
meta_ITS_genera_clean = meta_ITS_genera.drop_duplicates(subset = ['taxa_subject', 'extraction_kit_round'], keep = 'first')
meta_ITS_genera_pivot = meta_ITS_genera_clean.pivot(index='taxa_subject', columns='extraction_kit_round', values='counts')
meta_ITS_genera_pivot_clean = meta_ITS_genera_pivot.dropna()
# Export dataframe to file
meta_ITS_genera_pivot_clean.to_csv('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/table_correlation_ITS_genera_%s.txt'%string,
sep = '\t',
index = False)
_____no_output_____
</code>
Shotgun_____no_output_____
<code>
list_of_lists = []
for key, value in subsets_w_bl.items():
string = ''.join(key)
#merge metadata subsets with baseline with taxonomy
meta_shotgun_genera = pd.merge(value, dna_bothPS_shotgun_genus, left_index=True, right_on='sample')
#create new column
meta_shotgun_genera['taxa_subject'] = meta_shotgun_genera['taxa'] + meta_shotgun_genera['host_subject_id']
#subtract out duplicates and pivot
meta_shotgun_genera_clean = meta_shotgun_genera.drop_duplicates(subset = ['taxa_subject', 'extraction_kit_round'], keep = 'first')
meta_shotgun_genera_pivot = meta_shotgun_genera_clean.pivot(index='taxa_subject', columns='extraction_kit_round', values='counts')
meta_shotgun_genera_pivot_clean = meta_shotgun_genera_pivot.dropna()
# Export dataframe to file
meta_shotgun_genera_pivot_clean.to_csv('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/table_correlation_shotgun_genera_%s.txt'%string,
sep = '\t',
index = False)
_____no_output_____
</code>
# Code below is not used
## NOTE: The first cell was originally appended to the cell above_____no_output_____
<code>
# check pearson correlation
x = meta_16S_genera_pivot_clean.iloc[:,1]
y = meta_16S_genera_pivot_clean[key]
corr = stats.pearsonr(x, y)
int1, int2 = corr
corr_rounded = round(int1, 2)
corr_str = str(corr_rounded)
x_key = key[0]
y_key = key[1]
list1 = []
list1.append(corr_rounded)
list1.append(key)
list_of_lists.append(list1)
_____no_output_____list_of_lists_____no_output_____df = pd.DataFrame(list_of_lists, columns = ['Correlation', 'Extraction kit'])
_____no_output_____df.to_csv('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/table_correlations_16S_genera.txt',
sep = '\t',
index = False)
_____no_output_____splot = sns.catplot(y="Correlation",
x="Extraction kit",
hue= "Extraction kit",
kind='bar',
data=df,
dodge = False)
splot.set(ylim=(0, 1))
plt.xticks(rotation=45,
horizontalalignment='right')
#new_labels = ['−20C','−20C after 1 week', '4C','Ambient','Freeze-thaw','Heat']
#for t, l in zip(splot._legend.texts, new_labels):
# t.set_text(l)
splot.savefig('correlation_16S_genera.png')
splot.savefig('correlation_16S_genera.svg', format='svg', dpi=1200)
_____no_output_____
</code>
### Individual correlation plots _____no_output_____
<code>
for key, value in subsets_w_bl.items():
string = ''.join(key)
#merge metadata subsets with baseline with taxonomy
meta_16S_genera = pd.merge(value, dna_bothPS_16S_genus, left_index=True, right_on='sample')
#create new column
meta_16S_genera['taxa_subject'] = meta_16S_genera['taxa'] + meta_16S_genera['host_subject_id']
#subtract out duplicates and pivot
meta_16S_genera_clean = meta_16S_genera.drop_duplicates(subset = ['taxa_subject', 'extraction_kit_round'], keep = 'first')
meta_16S_genera_pivot = meta_16S_genera_clean.pivot(index='taxa_subject', columns='extraction_kit_round', values='counts')
meta_16S_genera_pivot_clean = meta_16S_genera_pivot.dropna()
# check pearson correlation
x = meta_16S_genera_pivot_clean.iloc[:,1]
y = meta_16S_genera_pivot_clean[key]
corr = stats.pearsonr(x, y)
int1, int2 = corr
corr_rounded = round(int1, 2)
corr_str = str(corr_rounded)
#make correlation plots
meta_16S_genera_pivot_clean['x1'] = meta_16S_genera_pivot_clean.iloc[:,1]
meta_16S_genera_pivot_clean['y1'] = meta_16S_genera_pivot_clean.iloc[:,0]
ax=sns.lmplot(x='x1',
y='y1',
data=meta_16S_genera_pivot_clean,
height=3.8)
ax.set(yscale='log')
ax.set(xscale='log')
ax.set(xlabel='PowerSoil', ylabel=key)
#plt.xlim(0.00001, 10000000)
#plt.ylim(0.00001, 10000000)
plt.title(string + ' (%s)' %corr_str)
ax.savefig('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/figure_scatter_correlation_16S_genera_%s.png'%string)
ax.savefig('/Users/Justin/Mycelium/UCSD/00_Knight_Lab/03_Extraction_test_12201/round_02/results/feature_abundance_correlation_images/figure_scatter_correlation_16S_genera_%s.svg'%string, format='svg',dpi=1200)
_____no_output_____
</code>
| {
"repository": "justinshaffer/Extraction_kit_benchmarking",
"path": "code/Taxon profile analysis.ipynb",
"matched_keywords": [
"microbiome",
"QIIME2"
],
"stars": null,
"size": 23131,
"hexsha": "d0096cb02dc68507e2b0cfb172642550ef65c2c8",
"max_line_length": 246,
"avg_line_length": 34.7834586466,
"alphanum_fraction": 0.6314469759
} |
# Notebook from rpatil524/Community-Notebooks
Path: MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier_with_BigQuery_ML.ipynb
<a href="https://colab.research.google.com/github/isb-cgc/Community-Notebooks/blob/master/MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier_with_BigQuery_ML.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>_____no_output_____# How to build an RNA-seq logistic regression classifier with BigQuery ML
Check out other notebooks at our [Community Notebooks Repository](https://github.com/isb-cgc/Community-Notebooks)!
- **Title:** How to build an RNA-seq logistic regression classifier with BigQuery ML
- **Author:** John Phan
- **Created:** 2021-07-19
- **Purpose:** Demonstrate use of BigQuery ML to predict a cancer endpoint using gene expression data.
- **URL:** https://github.com/isb-cgc/Community-Notebooks/blob/master/MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier_with_BigQuery_ML.ipynb
- **Note:** This example is based on the work published by [Bosquet et al.](https://molecular-cancer.biomedcentral.com/articles/10.1186/s12943-016-0548-9)
This notebook builds upon the [scikit-learn notebook](https://github.com/isb-cgc/Community-Notebooks/blob/master/MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier.ipynb) and demonstrates how to build a machine learning model using BigQuery ML to predict ovarian cancer treatment outcome. BigQuery is used to create a temporary data table that contains both training and testing data. These datasets are then used to fit and evaluate a Logistic Regression classifier. _____no_output_____# Import Dependencies_____no_output_____
<code>
# GCP libraries
from google.cloud import bigquery
from google.colab import auth_____no_output_____
</code>
## Authenticate
Before using BigQuery, we need to get authorization for access to BigQuery and the Google Cloud. For more information see ['Quick Start Guide to ISB-CGC'](https://isb-cancer-genomics-cloud.readthedocs.io/en/latest/sections/HowToGetStartedonISB-CGC.html). Alternative authentication methods can be found [here](https://googleapis.dev/python/google-api-core/latest/auth.html)_____no_output_____
<code>
# if you're using Google Colab, authenticate to gcloud with the following
auth.authenticate_user()
# alternatively, use the gcloud SDK
#!gcloud auth application-default login_____no_output_____
</code>
## Parameters
Customize the following parameters based on your notebook, execution environment, or project. BigQuery ML must create and store classification models, so be sure that you have write access to the locations stored in the "bq_dataset" and "bq_project" variables. _____no_output_____
<code>
# set the google project that will be billed for this notebook's computations
google_project = 'google-project' ## CHANGE ME
# bq project for storing ML model
bq_project = 'bq-project' ## CHANGE ME
# bq dataset for storing ML model
bq_dataset = 'scratch' ## CHANGE ME
# name of temporary table for data
bq_tmp_table = 'tmp_data'
# name of ML model
bq_ml_model = 'tcga_ov_therapy_ml_lr_model'
# in this example, we'll be using the Ovarian cancer TCGA dataset
cancer_type = 'TCGA-OV'
# genes used for prediction model, taken from Bosquet et al.
genes = "'RHOT1','MYO7A','ZBTB10','MATK','ST18','RPS23','GCNT1','DROSHA','NUAK1','CCPG1',\
'PDGFD','KLRAP1','MTAP','RNF13','THBS1','MLX','FAP','TIMP3','PRSS1','SLC7A11',\
'OLFML3','RPS20','MCM5','POLE','STEAP4','LRRC8D','WBP1L','ENTPD5','SYNE1','DPT',\
'COPZ2','TRIO','PDPR'"
# clinical data table
clinical_table = 'isb-cgc-bq.TCGA_versioned.clinical_gdc_2019_06'
# RNA seq data table
rnaseq_table = 'isb-cgc-bq.TCGA.RNAseq_hg38_gdc_current'
_____no_output_____
</code>
## BigQuery Client
Create the BigQuery client._____no_output_____
<code>
# Create a client to access the data within BigQuery
client = bigquery.Client(google_project)_____no_output_____
</code>
## Create a Table with a Subset of the Gene Expression Data
Pull RNA-seq gene expression data from the TCGA RNA-seq BigQuery table, join it with clinical labels, and pivot the table so that it can be used with BigQuery ML. In this example, we will label the samples based on therapy outcome. "Complete Remission/Response" will be labeled as "1" while all other therapy outcomes will be labeled as "0". This prepares the data for binary classification.
Prediction modeling with RNA-seq data typically requires a feature selection step to reduce the dimensionality of the data before training a classifier. However, to simplify this example, we will use a pre-identified set of 33 genes (Bosquet et al. identified 34 genes, but PRSS2 and its aliases are not available in the hg38 RNA-seq data).
Creation of a BQ table with only the data of interest reduces the size of the data passed to BQ ML and can significantly reduce the cost of running BQ ML queries. This query also randomly splits the dataset into "training" and "testing" sets using the "FARM_FINGERPRINT" hash function in BigQuery. "FARM_FINGERPRINT" generates an integer from the input string. More information can be found [here](https://cloud.google.com/bigquery/docs/reference/standard-sql/hash_functions)._____no_output_____
<code>
tmp_table_query = client.query(("""
BEGIN
CREATE OR REPLACE TABLE `{bq_project}.{bq_dataset}.{bq_tmp_table}` AS
SELECT * FROM (
SELECT
labels.case_barcode as sample,
labels.data_partition as data_partition,
labels.response_label AS label,
ge.gene_name AS gene_name,
-- Multiple samples may exist per case, take the max value
MAX(LOG(ge.HTSeq__FPKM_UQ+1)) AS gene_expression
FROM `{rnaseq_table}` AS ge
INNER JOIN (
SELECT
*
FROM (
SELECT
case_barcode,
primary_therapy_outcome_success,
CASE
-- Complete Reponse --> label as 1
-- All other responses --> label as 0
WHEN primary_therapy_outcome_success = 'Complete Remission/Response' THEN 1
WHEN (primary_therapy_outcome_success IN (
'Partial Remission/Response','Progressive Disease','Stable Disease'
)) THEN 0
END AS response_label,
CASE
WHEN MOD(ABS(FARM_FINGERPRINT(case_barcode)), 10) < 5 THEN 'training'
WHEN MOD(ABS(FARM_FINGERPRINT(case_barcode)), 10) >= 5 THEN 'testing'
END AS data_partition
FROM `{clinical_table}`
WHERE
project_short_name = '{cancer_type}'
AND primary_therapy_outcome_success IS NOT NULL
)
) labels
ON labels.case_barcode = ge.case_barcode
WHERE gene_name IN ({genes})
GROUP BY sample, label, data_partition, gene_name
)
PIVOT (
MAX(gene_expression) FOR gene_name IN ({genes})
);
END;
""").format(
bq_project=bq_project,
bq_dataset=bq_dataset,
bq_tmp_table=bq_tmp_table,
rnaseq_table=rnaseq_table,
clinical_table=clinical_table,
cancer_type=cancer_type,
genes=genes
)).result()
print(tmp_table_query)<google.cloud.bigquery.table._EmptyRowIterator object at 0x7f3894001250>
</code>
Let's take a look at this subset table. The data has been pivoted such that each of the 33 genes is available as a column that can be "SELECTED" in a query. In addition, the "label" and "data_partition" columns simplify data handling for classifier training and evaluation. _____no_output_____
<code>
tmp_table_data = client.query(("""
SELECT
* --usually not recommended to use *, but in this case, we want to see all of the 33 genes
FROM `{bq_project}.{bq_dataset}.{bq_tmp_table}`
""").format(
bq_project=bq_project,
bq_dataset=bq_dataset,
bq_tmp_table=bq_tmp_table
)).result().to_dataframe()
print(tmp_table_data.info())
tmp_table_data<class 'pandas.core.frame.DataFrame'>
RangeIndex: 264 entries, 0 to 263
Data columns (total 36 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sample 264 non-null object
1 data_partition 264 non-null object
2 label 264 non-null int64
3 RHOT1 264 non-null float64
4 MYO7A 264 non-null float64
5 ZBTB10 264 non-null float64
6 MATK 264 non-null float64
7 ST18 264 non-null float64
8 RPS23 264 non-null float64
9 GCNT1 264 non-null float64
10 DROSHA 264 non-null float64
11 NUAK1 264 non-null float64
12 CCPG1 264 non-null float64
13 PDGFD 264 non-null float64
14 KLRAP1 264 non-null float64
15 MTAP 264 non-null float64
16 RNF13 264 non-null float64
17 THBS1 264 non-null float64
18 MLX 264 non-null float64
19 FAP 264 non-null float64
20 TIMP3 264 non-null float64
21 PRSS1 264 non-null float64
22 SLC7A11 264 non-null float64
23 OLFML3 264 non-null float64
24 RPS20 264 non-null float64
25 MCM5 264 non-null float64
26 POLE 264 non-null float64
27 STEAP4 264 non-null float64
28 LRRC8D 264 non-null float64
29 WBP1L 264 non-null float64
30 ENTPD5 264 non-null float64
31 SYNE1 264 non-null float64
32 DPT 264 non-null float64
33 COPZ2 264 non-null float64
34 TRIO 264 non-null float64
35 PDPR 264 non-null float64
dtypes: float64(33), int64(1), object(2)
memory usage: 74.4+ KB
None
</code>
# Train the Machine Learning Model
Now we can train a classifier using BigQuery ML with the data stored in the subset table. This model will be stored in the location specified by the "bq_ml_model" variable, and can be reused to predict samples in the future.
We pass three options to the BQ ML model: model_type, auto_class_weights, and input_label_cols. Model_type specifies the classifier model type. In this case, we use "LOGISTIC_REG" to train a logistic regression classifier. Other classifier options are documented [here](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create). Auto_class_weights indicates whether samples should be weighted to balance the classes. For example, if the dataset happens to have more samples labeled as "Complete Response", those samples would be less weighted to ensure that the model is not biased towards predicting those samples. Input_label_cols tells BigQuery that the "label" column should be used to determine each sample's label.
**Warning**: BigQuery ML models can be very time-consuming and expensive to train. Please check your data size before running BigQuery ML commands. Information about BigQuery ML costs can be found [here](https://cloud.google.com/bigquery-ml/pricing)._____no_output_____
<code>
# create ML model using BigQuery
ml_model_query = client.query(("""
CREATE OR REPLACE MODEL `{bq_project}.{bq_dataset}.{bq_ml_model}`
OPTIONS
(
model_type='LOGISTIC_REG',
auto_class_weights=TRUE,
input_label_cols=['label']
) AS
SELECT * EXCEPT(sample, data_partition) -- when training, we only the labels and feature columns
FROM `{bq_project}.{bq_dataset}.{bq_tmp_table}`
WHERE data_partition = 'training' -- using training data only
""").format(
bq_project=bq_project,
bq_dataset=bq_dataset,
bq_ml_model=bq_ml_model,
bq_tmp_table=bq_tmp_table
)).result()
print(ml_model_query)
# now get the model metadata
ml_model = client.get_model('{}.{}.{}'.format(bq_project, bq_dataset, bq_ml_model))
print(ml_model)<google.cloud.bigquery.table._EmptyRowIterator object at 0x7f3893663810>
Model(reference=ModelReference(project='isb-project-zero', dataset_id='jhp_scratch', project_id='tcga_ov_therapy_ml_lr_model'))
</code>
# Evaluate the Machine Learning Model
Once the model has been trained and stored, we can evaluate the model's performance using the "testing" dataset from our subset table. Evaluating a BQ ML model is generally less expensive than training.
Use the following query to evaluate the BQ ML model. Note that we're using the "data_partition = 'testing'" clause to ensure that we're only evaluating the model with test samples from the subset table.
BigQuery's ML.EVALUATE function returns several performance metrics: precision, recall, accuracy, f1_score, log_loss, and roc_auc. More details about these performance metrics are available from [Google's ML Crash Course](https://developers.google.com/machine-learning/crash-course/classification/video-lecture). Specific topics can be found at the following URLs: [precision and recall](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall), [accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy), [ROC and AUC](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc). _____no_output_____
<code>
ml_eval = client.query(("""
SELECT * FROM ML.EVALUATE (MODEL `{bq_project}.{bq_dataset}.{bq_ml_model}`,
(
SELECT * EXCEPT(sample, data_partition)
FROM `{bq_project}.{bq_dataset}.{bq_tmp_table}`
WHERE data_partition = 'testing'
)
)
""").format(
bq_project=bq_project,
bq_dataset=bq_dataset,
bq_ml_model=bq_ml_model,
bq_tmp_table=bq_tmp_table
)).result().to_dataframe()_____no_output_____# Display the table of evaluation results
ml_eval_____no_output_____
</code>
# Predict Outcome for One or More Samples
ML.EVALUATE evaluates a model's performance, but does not produce actual predictions for each sample. In order to do that, we need to use the ML.PREDICT function. The syntax is similar to that of the ML.EVALUATE function and returns "label", "predicted_label", "predicted_label_probs", and all feature columns. Since the feature columns are unchanged from the input dataset, we select only the original label, predicted label, and probabilities for each sample.
Note that the input dataset can include one or more samples, and must include the same set of features as the training dataset. _____no_output_____
<code>
ml_predict = client.query(("""
SELECT
label,
predicted_label,
predicted_label_probs
FROM ML.PREDICT (MODEL `{bq_project}.{bq_dataset}.{bq_ml_model}`,
(
SELECT * EXCEPT(sample, data_partition)
FROM `{bq_project}.{bq_dataset}.{bq_tmp_table}`
WHERE data_partition = 'testing' -- Use the testing dataset
)
)
""").format(
bq_project=bq_project,
bq_dataset=bq_dataset,
bq_ml_model=bq_ml_model,
bq_tmp_table=bq_tmp_table
)).result().to_dataframe()_____no_output_____# Display the table of prediction results
ml_predict_____no_output_____# Calculate the accuracy of prediction, which should match the result of ML.EVALUATE
accuracy = 1-sum(abs(ml_predict['label']-ml_predict['predicted_label']))/len(ml_predict)
print('Accuracy: ', accuracy)Accuracy: 0.6230769230769231
</code>
# Next Steps
The BigQuery ML logistic regression model trained in this notebook is comparable to the scikit-learn model developed in our [companion notebook](https://github.com/isb-cgc/Community-Notebooks/blob/master/MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier.ipynb). BigQuery ML simplifies the model building and evaluation process by enabling bioinformaticians to use machine learning within the BigQuery ecosystem. However, it is often necessary to optimize performance by evaluating several types of models (i.e., other than logistic regression), and tuning model parameters. Due to the cost of BigQuery ML for training, such iterative model fine-tuning may be cost prohibitive. In such cases, a combination of scikit-learn (or other libraries such as Keras and TensorFlow) and BigQuery ML may be appropriate. E.g., models can be fine-tuned using scikit-learn and published as a BigQuery ML model for production applications. In future notebooks, we will explore methods for model selection, optimization, and publication with BigQuery ML. _____no_output_____
| {
"repository": "rpatil524/Community-Notebooks",
"path": "MachineLearning/How_to_build_an_RNAseq_logistic_regression_classifier_with_BigQuery_ML.ipynb",
"matched_keywords": [
"RNA-seq"
],
"stars": 16,
"size": 53907,
"hexsha": "d00c6cf71bdffc5e1414b4ece1a89cb27eb58159",
"max_line_length": 1068,
"avg_line_length": 42.9195859873,
"alphanum_fraction": 0.4078691079
} |
# Notebook from jouterleys/BiomchBERT
Path: classify_papers.ipynb
Uses Fine-Tuned BERT network to classify biomechanics papers from PubMed_____no_output_____
<code>
# Check date
!rm /etc/localtime
!ln -s /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
!date
# might need to restart runtime if timezone didn't changeThu Mar 24 06:59:32 PDT 2022
## Install & load libraries
!pip install tensorflow==2.7.0
try:
from official.nlp import optimization
except:
!pip install -q -U tf-models-official==2.4.0
from official.nlp import optimization
try:
from Bio import Entrez
except:
!pip install -q -U biopython
from Bio import Entrez
try:
import tensorflow_text as text
except:
!pip install -q -U tensorflow_text==2.7.3
import tensorflow_text as text
import pandas as pd
import numpy as np
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import tensorflow as tf # probably have to lock version
import string
import datetime
from bs4 import BeautifulSoup
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import load_model
import tensorflow_hub as hub
from google.colab import drive
import datetime as dt
#Define date range
today = dt.date.today()
yesterday = today - dt.timedelta(days=1)
week_ago = yesterday - dt.timedelta(days=7) # ensure overlap in pubmed search
days_ago_6 = yesterday - dt.timedelta(days=6) # for text output
# Mount Google Drive for model and csv up/download
drive.mount('/content/gdrive')
print(today)Collecting tensorflow==2.7.0
Downloading tensorflow-2.7.0-cp37-cp37m-manylinux2010_x86_64.whl (489.6 MB)
[K |████████████████████████████████| 489.6 MB 24 kB/s
[?25hRequirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.44.0)
Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.1.2)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.1.0)
Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (2.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.15.0)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.24.0)
Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.21.5)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.6.3)
Collecting gast<0.5.0,>=0.2.1
Downloading gast-0.4.0-py3-none-any.whl (9.8 kB)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.14.0)
Collecting keras<2.8,>=2.7.0rc0
Downloading keras-2.7.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 43.1 MB/s
[?25hRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.17.3)
Requirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.37.1)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.2.0)
Requirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (2.8.0)
Requirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (13.0.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.10.0.2)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.1.0)
Requirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.0.0)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.3.0)
Collecting tensorflow-estimator<2.8,~=2.7.0rc0
Downloading tensorflow_estimator-2.7.0-py2.py3-none-any.whl (463 kB)
[K |████████████████████████████████| 463 kB 48.7 MB/s
[?25hRequirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow==2.7.0) (1.5.2)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (57.4.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.0.1)
Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.35.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (3.3.6)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (0.6.1)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (2.23.0)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.8.1)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (0.4.6)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (4.2.4)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (4.8)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (0.2.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.7.0) (1.3.1)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.7.0) (4.11.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.7.0) (3.7.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (0.4.8)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (3.0.4)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (2021.10.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (1.24.3)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.7.0) (3.2.0)
Installing collected packages: tensorflow-estimator, keras, gast, tensorflow
Attempting uninstall: tensorflow-estimator
Found existing installation: tensorflow-estimator 2.8.0
Uninstalling tensorflow-estimator-2.8.0:
Successfully uninstalled tensorflow-estimator-2.8.0
Attempting uninstall: keras
Found existing installation: keras 2.8.0
Uninstalling keras-2.8.0:
Successfully uninstalled keras-2.8.0
Attempting uninstall: gast
Found existing installation: gast 0.5.3
Uninstalling gast-0.5.3:
Successfully uninstalled gast-0.5.3
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.8.0
Uninstalling tensorflow-2.8.0:
Successfully uninstalled tensorflow-2.8.0
Successfully installed gast-0.4.0 keras-2.7.0 tensorflow-2.7.0 tensorflow-estimator-2.7.0
[K |████████████████████████████████| 1.1 MB 5.3 MB/s
[K |████████████████████████████████| 99 kB 7.8 MB/s
[K |████████████████████████████████| 596 kB 41.1 MB/s
[K |████████████████████████████████| 352 kB 49.1 MB/s
[K |████████████████████████████████| 1.1 MB 37.0 MB/s
[K |████████████████████████████████| 47.8 MB 57 kB/s
[K |████████████████████████████████| 1.2 MB 43.6 MB/s
[K |████████████████████████████████| 43 kB 1.8 MB/s
[K |████████████████████████████████| 237 kB 45.6 MB/s
[?25h Building wheel for py-cpuinfo (setup.py) ... [?25l[?25hdone
Building wheel for seqeval (setup.py) ... [?25l[?25hdone
[K |████████████████████████████████| 2.3 MB 4.9 MB/s
[K |████████████████████████████████| 4.9 MB 5.5 MB/s
[?25h[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Unzipping corpora/stopwords.zip.
Mounted at /content/gdrive
2022-03-24
# Define Search Criteria ----
def search(query):
Entrez.email = '[email protected]'
handle = Entrez.esearch(db='pubmed',
sort='most recent',
retmax='5000',
retmode='xml',
datetype='pdat', # pdat is published date, edat is entrez date.
# reldate=7, # only within n days from now
mindate= min_date,
maxdate= max_date, # for searching date range
term=query)
results = Entrez.read(handle)
return results
# Perform Search and Pull Paper Titles ----
def fetch_details(ids):
Entrez.email = '[email protected]'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
return results
# Make the stop words for string cleaning ----
def html_strip(text):
text = BeautifulSoup(text, 'lxml').text
text = text.replace('[','').replace(']','')
return text
def clean_str(text, stops):
text = BeautifulSoup(text, 'lxml').text
text = text.split()
return ' '.join([word for word in text if word not in stops])
stop = list(stopwords.words('english'))
stop_c = [string.capwords(word) for word in stop]
for word in stop_c:
stop.append(word)
new_stop = ['The', 'An', 'A', 'Do', 'Is', 'In', 'StringElement',
'NlmCategory', 'Label', 'attributes', 'INTRODUCTION',
'METHODS', 'BACKGROUND', 'RESULTS', 'CONCLUSIONS']
for s in new_stop:
stop.append(s)
# Search terms (can test string with Pubmed Advanced Search) ----
# search_results = search('(Biomech*[Title/Abstract] OR locomot*[Title/Abstract])')
min_date = week_ago.strftime('%m/%d/%Y')
max_date = yesterday.strftime('%m/%d/%Y')
search_results = search('(biomech*[Title/Abstract] OR locomot*[Title/Abstract] NOT opiod*[Title/Abstract] NOT pharm*[Journal] NOT mouse[Title/Abstract] NOT drosophil*[Title/Abstract] NOT mice[Title/Abstract] NOT rats*[Title/Abstract] NOT elegans[Title/Abstract])')
id_list = search_results['IdList']
papers = fetch_details(id_list)
print(len(papers['PubmedArticle']), 'Papers found')
titles, full_titles, keywords, authors, links, journals, abstracts = ([] for i in range(7))
for paper in papers['PubmedArticle']:
# clean and store titles, abstracts, and links
t = clean_str(paper['MedlineCitation']['Article']['ArticleTitle'],
stop).replace('[','').replace(']','').capitalize() # rm brackets that survived beautifulsoup, sentence case
titles.append(t)
full_titles.append(paper['MedlineCitation']['Article']['ArticleTitle'])
pmid = paper['MedlineCitation']['PMID']
links.append('[URL="https://www.ncbi.nlm.nih.gov/pubmed/{0}"]{1}[/URL]'.format(pmid, html_strip(paper['MedlineCitation']['Article']['ArticleTitle'])))
try:
abstracts.append(clean_str(paper['MedlineCitation']['Article']['Abstract']['AbstractText'][0],
stop).replace('[','').replace(']','').capitalize()) # rm brackets that survived beautifulsoup, sentence case
except:
abstracts.append('')
# clean and store authors
auths = []
try:
for auth in paper['MedlineCitation']['Article']['AuthorList']:
try: # see if there is a last name and initials
auth_name = [auth['LastName'], auth['Initials'] + ',']
auth_name = ' '.join(auth_name)
auths.append(auth_name)
except:
if 'LastName' in auth.keys(): # maybe they don't have initials
auths.append(auth['LastName'] + ',')
else: # no last name
auths.append('')
print(paper['MedlineCitation']['Article']['ArticleTitle'],
'has an issue with an author name:')
except:
auths.append('AUTHOR NAMES ERROR')
print(paper['MedlineCitation']['Article']['ArticleTitle'], 'has no author list?')
# compile authors
authors.append(' '.join(auths).replace('[','').replace(']','')) # rm brackets in names
# journal names
journals.append(paper['MedlineCitation']['Article']['Journal']['Title'].replace('[','').replace(']','')) # rm brackets
# store keywords
if paper['MedlineCitation']['KeywordList'] != []:
kwds = []
for kw in paper['MedlineCitation']['KeywordList'][0]:
kwds.append(kw[:])
keywords.append(', '.join(kwds).lower())
else:
keywords.append('')
# Put Titles, Abstracts, Authors, Journal, and Keywords into dataframe
papers_df = pd.DataFrame({'title': titles,
'keywords': keywords,
'abstract': abstracts,
'authors': authors,
'journal': journals,
'links': links,
'raw_title': full_titles,
'mindate': min_date,
'maxdate': max_date})
# remove papers with no title or no authors
for index, row in papers_df.iterrows():
if row['title'] == '' or row['authors'] == 'AUTHOR NAMES ERROR':
papers_df.drop(index, inplace=True)
papers_df.reset_index(drop=True, inplace=True)
# join titles and abstract
papers_df['BERT_input'] = pd.DataFrame(papers_df['title'] + ' ' + papers_df['abstract'])
# Load Fine-Tuned BERT Network ----
model = tf.saved_model.load('/content/gdrive/My Drive/BiomchBERT/Data/BiomchBERT/')
print('Loaded model from disk')
# Load Label Encoder ----
le = LabelEncoder()
le.classes_ = np.load('/content/gdrive/My Drive/BiomchBERT/Data/BERT_label_encoder.npy')
print('Loaded Label Encoder')
84 Papers found
Loaded model from disk
Loaded Label Encoder
# Predict Paper Topic ----
predicted_topic = model(papers_df['BERT_input'], training=False) # will run out of GPU memory (14GB) if predicting more than ~2000 title+abstracts at once_____no_output_____# Determine Publications that BiomchBERT is unsure about ----
topics, pred_val_str = ([] for i in range(2))
for pred_prob in predicted_topic:
pred_val = np.max(pred_prob)
if pred_val > 1.5 * np.sort(pred_prob)[-2]: # Is top confidence score more than 1.5x the second best confidence score?
topics.append(le.inverse_transform([np.argmax(pred_prob)])[0])
top1 = le.inverse_transform([np.argmax(pred_prob)])[0]
top2 = le.inverse_transform([list(pred_prob).index([np.sort(pred_prob)[-2]])])[0]
# pred_val_str.append(pred_val * 100) # just report top category
pred_val_str.append(str(np.round(pred_val * 100, 1)) + '% ' + str(top1) + '; ' + str(
np.round(np.sort(pred_prob)[-2] * 100, 1)) + '% ' + str(top2)) # report top 2 categories
else:
topics.append('UNKNOWN')
top1 = le.inverse_transform([np.argmax(pred_prob)])[0]
top2 = le.inverse_transform([list(pred_prob).index([np.sort(pred_prob)[-2]])])[0]
pred_val_str.append(str(np.round(pred_val * 100, 1)) + '% ' + str(top1) + '; ' + str(
np.round(np.sort(pred_prob)[-2] * 100, 1)) + '% ' + str(top2))
papers_df['topic'] = topics
papers_df['pred_val'] = pred_val_str
print('BiomchBERT is unsure about {0} papers\n'.format(len(papers_df[papers_df['topic'] == 'UNKNOWN'])))
BiomchBERT is unsure about 6 papers
# Prompt User to decide for BiomchBERT ----
unknown_papers = papers_df[papers_df['topic'] == 'UNKNOWN']
for indx, paper in unknown_papers.iterrows():
print(paper['raw_title'])
print(paper['journal'])
print(paper['pred_val'])
print()
splt_str = paper['pred_val'].split(';')
options = [str for pred_cls in splt_str for str in le.classes_ if (str in pred_cls)]
choice = input('(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? ')
print()
if choice == '1':
papers_df.iloc[indx]['topic'] = str(options[0])
elif choice == '2':
papers_df.iloc[indx]['topic'] = str(options[1])
elif choice == 'o':
# print all categories so you can select
for i in zip(range(len(le.classes_)),le.classes_):
print(i)
new_cat = input('Enter number of new class or type "r" to remove paper: ')
print()
if new_cat == 'r':
papers_df.iloc[indx]['topic'] = '_REMOVE_' # not deleted, but withheld from text file output
else:
papers_df.iloc[indx]['topic'] = le.classes_[int(new_cat)]
elif choice == 'r':
papers_df.iloc[indx]['topic'] = '_REMOVE_' # not deleted, but withheld from text file output
print('Removing {0} papers\n'.format(len(papers_df[papers_df['topic'] == '_REMOVE_'])))Contribution of sensory feedback to Soleus muscle activity during voluntary contraction in humans.
Journal of neurophysiology
51.6% NEURAL; 38.0% MUSCLE
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? 1
Anterior Cable Reconstruction: Prioritizing Rotator Cable and Tendon Cord When Considering Superior Capsular Reconstruction.
Arthroscopy : the journal of arthroscopic & related surgery : official publication of the Arthroscopy Association of North America and the International Arthroscopy Association
47.0% ORTHOPAEDICS/SURGERY; 45.3% TENDON/LIGAMENT
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? 1
Comparison the Effect of Pain Neuroscience and Pain Biomechanics Education on Neck Pain and Fear of Movement in Patients with Chronic Nonspecific Neck Pain During the COVID-19 Pandemic.
Pain and therapy
45.3% REHABILITATION; 34.9% ERGONOMICS
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? 1
The role of nanoplastics on the toxicity of the herbicide phenmedipham, using Danio rerio embryos as model organisms.
Environmental pollution (Barking, Essex : 1987)
28.0% COMPARATIVE; 23.5% CELLULAR/SUBCELLULAR
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? r
The Terrific Skink bite force suggests insularity as a likely driver to exceptional resource use.
Scientific reports
52.1% EVOLUTION/ANTHROPOLOGY; 42.1% COMPARATIVE
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? o
(0, 'BONE')
(1, 'BOTANY')
(2, 'CARDIOVASCULAR/CARDIOPULMONARY')
(3, 'CELLULAR/SUBCELLULAR')
(4, 'COMPARATIVE')
(5, 'DENTAL/ORAL/FACIAL')
(6, 'ERGONOMICS')
(7, 'EVOLUTION/ANTHROPOLOGY')
(8, 'GAIT/LOCOMOTION')
(9, 'HAND/FINGER/FOOT/TOE')
(10, 'JOINT/CARTILAGE')
(11, 'METHODS')
(12, 'MODELING')
(13, 'MUSCLE')
(14, 'NEURAL')
(15, 'ORTHOPAEDICS/SPINE')
(16, 'ORTHOPAEDICS/SURGERY')
(17, 'POSTURE/BALANCE')
(18, 'PROSTHETICS/ORTHOTICS')
(19, 'REHABILITATION')
(20, 'ROBOTICS')
(21, 'SPORT/EXERCISE')
(22, 'TENDON/LIGAMENT')
(23, 'TISSUE/BIOMATERIAL')
(24, 'TRAUMA/IMPACT')
(25, 'VETERINARY/AGRICULTURAL')
(26, 'VISUAL/VESTIBULAR')
Enter number of new class or type "r" to remove paper: 4
Overground gait kinematics and muscle activation patterns in the Yucatan mini pig.
Journal of neural engineering
36.0% COMPARATIVE; 28.8% GAIT/LOCOMOTION
(1)st topic, (2)nd topic, (o)ther topic, or (r)emove paper? 1
Removing 1 papers
# Double check that none of these papers were included in past literature updates ----
# load prior papers
# papers_df.to_csv('/content/gdrive/My Drive/BiomchBERT/Updates/prior_papers.csv', index=False) # run ONLY if there are no prior papers
prior_papers = pd.read_csv('/content/gdrive/My Drive/BiomchBERT/Updates/prior_papers.csv')
prior_papers.dropna(subset=['title'], inplace=True)
prior_papers.reset_index(drop=True, inplace=True)
# NEED TO DO: find matching papers between current week and prior papers using Pubmed ID since titles can change from ahead of print to final version.
# match = papers_df['links'].split(']')[0].isin(prior_papers['links'].split(']')[0])
match = papers_df['title'].isin(prior_papers['title']) # boolean
print('Removing {0} papers found in prior literature updates\n'.format(sum(match)))
# filter and check if everything accidentally was removed
filtered_papers_df = papers_df.drop(papers_df[match].index)
if filtered_papers_df.shape[0] < 1:
raise ValueError('might have removed all the papers for some reason. ')
else:
papers_df = filtered_papers_df
papers_df.reset_index(drop=True, inplace=True)
updated_prior_papers = pd.concat([prior_papers, papers_df], axis=0)
updated_prior_papers.reset_index(drop=True, inplace=True)
updated_prior_papers.to_csv('/content/gdrive/My Drive/BiomchBERT/Updates/prior_papers.csv', index=False)Removing 18 papers found in prior literature updates
# Create Text File for Biomch-L ----
# Compile papers grouped by topic
txtname = '/content/gdrive/My Drive/BiomchBERT/Updates/' + today.strftime("%Y-%m-%d") + '-litupdate.txt'
txt = open(txtname, 'w', encoding='utf-8')
txt.write('[SIZE=16px][B]LITERATURE UPDATE[/B][/SIZE]\n')
txt.write(days_ago_6.strftime("%b %d, %Y") + ' - '+ yesterday.strftime("%b %d, %Y")+'\n') # a week ago from yesterday.
txt.write(
"""
Literature search terms: biomech* & locomot*
Publications are classified by [URL="https://www.ryan-alcantara.com/projects/p88_BiomchBERT/"]BiomchBERT[/URL], a neural network trained on past Biomch-L Literature Updates. BiomchBERT is managed by [URL="https://jouterleys.github.io"]Jereme Outerleys[/URL], a Doctoral Student at Queen's University. Each publication has a score (out of 100%) reflecting how confident BiomchBERT is that the publication belongs in a particular category (top 2 shown). If something doesn't look right, email jereme.outerleys[at]queensu.ca.
Twitter: [URL="https://www.twitter.com/jouterleys"]@jouterleys[/URL].
"""
)
# Write papers to text file grouped by topic ----
topic_list = np.unique(papers_df.sort_values('topic')['topic'])
for topic in topic_list:
papers_subset = pd.DataFrame(papers_df[papers_df.topic == topic].reset_index(drop=True))
txt.write('\n')
# TOPIC NAME (with some cleaning)
if topic == '_REMOVE_':
continue
elif topic == 'UNKNOWN':
txt.write('[SIZE=16px][B]*Papers BiomchBERT is unsure how to classify*[/B][/SIZE]\n')
elif topic == 'CARDIOVASCULAR/CARDIOPULMONARY':
topic = 'CARDIOVASCULAR/PULMONARY'
txt.write('[SIZE=16px][B]*%s*[/B][/SIZE]\n' % topic)
elif topic == 'CELLULAR/SUBCELLULAR':
topic = 'CELLULAR'
txt.write('[SIZE=16px][B]*%s*[/B][/SIZE]\n' % topic)
elif topic == 'ORTHOPAEDICS/SURGERY':
topic = 'ORTHOPAEDICS (SURGERY)'
txt.write('[SIZE=16px][B]*%s*[/B][/SIZE]\n' % topic)
elif topic == 'ORTHOPAEDICS/SPINE':
topic = 'ORTHOPAEDICS (SPINE)'
txt.write('[SIZE=16px][B]*%s*[/B][/SIZE]\n' % topic)
else:
txt.write('[SIZE=16px][B]*%s*[/B][/SIZE]\n' % topic)
# HYPERLINKED PAPERS, AUTHORS, JOURNAL NAME
for i, paper in enumerate(papers_subset['links']):
txt.write('[B]%s[/B] ' % paper)
txt.write('%s ' % papers_subset['authors'][i])
txt.write('[I]%s[/I]. ' % papers_subset['journal'][i])
# CONFIDENCE SCORE (BERT softmax categorical crossentropy)
try:
txt.write('(%.1f%%) \n\n' % papers_subset['pred_val'][i])
except:
txt.write('(%s)\n\n' % papers_subset['pred_val'][i])
txt.write('[SIZE=16px][B]*PICK OF THE WEEK*[/B][/SIZE]\n')
txt.close()
print('Literature Update Exported for Biomch-L')
print('Location:', txtname)Literature Update Exported for Biomch-L
Location: /content/gdrive/My Drive/BiomchBERT/Updates/2022-03-24-litupdate.txt
_____no_output_____
</code>
| {
"repository": "jouterleys/BiomchBERT",
"path": "classify_papers.ipynb",
"matched_keywords": [
"BioPython",
"evolution",
"neuroscience"
],
"stars": null,
"size": 31222,
"hexsha": "d00d1fd31d99a85620063e299bc079d92cc907c1",
"max_line_length": 31222,
"avg_line_length": 31222,
"alphanum_fraction": 0.6535135481
} |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 39