Dataset Viewer
text
stringlengths 35
445k
| metadata
dict |
---|---|
# biopython_notebook_1.ipynb
Repository: Deya-B/Bioinformatics-notes
<code>
from Bio.Seq import Seq
seq = Seq('GATTACA')
#Seq methods represent biological sequences as strings
print(seq)
</code>
<code>
seq = Seq('CAT')
for base in seq:
print(base, end=' ')
seq + 'GAT'
</code>
<code>
dna = Seq('GATTACA')
print(dna.complement())
print(dna.reverse_complement())
dna.lower()
</code>
<code>
cds = Seq('GTGTTTTTGGTGTGGTGA')
mRNA = cds.transcribe() #devuelve ARN
print(mRNA)
print(mRNA.back_transcribe())
</code>
<code>
mRNA = Seq('UUGUUUUUGGUGUGGUGA')
############## DIFERENTES TABLAS DE TRANSLACION #########################
print("Translate using standard table: ", mRNA.translate())
print("Translate until stop codon : ", mRNA.translate(to_stop=True)) #tener en cuenta el codon de parada
print("Using a different table : ", mRNA.translate(table=2))
print("by name : ", mRNA.translate(table="Vertebrate Mitochondrial"))
# This next line raises an error because the sequence is not a CDS for table=2
#print("CDS-like RNA : ", mRNA.translate(table=2, cds=True))
# But it is a CDS-like sequence for table=11 (Bacterial)
print("CDS-like RNA : ", mRNA.translate(table=11, cds=True)) #cds es que es una region
#codificante completa
cds = Seq('GTGTTTTTGGTGTGGTGA')
prot = cds.translate(table=11, cds=True)
prot = cds.translate()
prot
</code>
<code>
from Bio.Data import CodonTable
bacterial_table = CodonTable.unambiguous_dna_by_name['Bacterial']
print(bacterial_table)
#como esto sera la salida de nuestra tabla
</code>
<code>
from Bio.SeqRecord import SeqRecord
##### REGISTRO DE SECUENCIA ##### Toda la información de la secuencia + secuencia
sr = SeqRecord(Seq('AAA'), id='1', description='Simple seq', annotations={"molecule_type": "DNA"})
print(sr)
</code>
<code>
from Bio import SeqIO #IO input-output
# Para secuencias pequeñas
record = SeqIO.read("phix174/phix.fa", "fasta") #nombre de fichero, tipo de fichero
print(record)
</code>
<code>
#Para leer archivos mas grandes
for record in SeqIO.parse("other/ls_orchid.fasta", "fasta"): #parametros= nombre, tipo fichero
print(record) #es un iterador, por cada paso nos da cada secuencia
print()
</code>
<code>
for record in SeqIO.parse("other/ls_orchid.gbk", "genbank"):
print(record)
# Nos salen anotaciones, articulis cientificos, gran parte de toda la info que hay
</code>
<code>
iterator = SeqIO.parse("other/ls_orchid.gbk", "genbank")
records_dict = SeqIO.to_dict(iterator) #crear un diccionario
print(records_dict['Z78533.1']) #para imprimir la secuencia con el identificador dado
</code>
<code>
#dd = {}
#dd[record.id]=record #esto metido dentro del bucle te da todos los records
#dd['Z78533.1'] #te daría los detalles de este id solo
#claves en memoria, datos en el disco, hace que vaya todo mas rapido
#importante records_dict.close() al final, porque sino mantiene el fichero abierto
records_dict = SeqIO.index("other/ls_orchid.gbk", "genbank")
print(records_dict['Z78533.1'])
records_dict.close()
</code>
<code>
#para leer ficheros comprimidos
import gzip
with gzip.open("arabidopsis_thaliana/GCF_000001735.3_TAIR10_rna.fna.gz", "rt") as f:
total_len = 0
for sr in SeqIO.parse(f, "fasta"):
total_len += len(sr.seq)
print(total_len)
</code>
<code>
#OCUPA POCO EN MEMORIA Y POCO EN DISCO, es una forma de comprimir en bloques
#pero requiere un formato especial:
# Use this line in a shell to create the bgzf file
# You must have tabix or SAMtools installed
# bgzip -c GCF_000027305.1_ASM2730v1_genomic.gbff > GCF_000027305.1_ASM2730v1_genomic.gbff.bgz
records_dict = SeqIO.index("haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff.bgz", "genbank")
seq = records_dict['NC_000907.1']
records_dict.close()
</code>
<code>
records_dict = SeqIO.index("other/ls_orchid.gbk", "genbank")
record = records_dict['Z78533.1']
records_dict.close()
print("ID:",record.id, "Name:", record.name) #SeqRecord tiene muchas cosas
print("Description:",record.description)
print(record.annotations)
print(record.features)
</code>
<code>
print(record.annotations.keys()) #referencia a las anotaciones
</code>
<code>
for ref in record.annotations['references']:
print(ref)
for l in ref.location:
print(l)
</code>
<code>
from Bio import SeqFeature
records_dict = SeqIO.index("other/ls_orchid.gbk", "genbank")
print(len(records_dict['Z78533.1'].features), "features found")
for feature in records_dict['Z78533.1'].features:
print("----------------------------------------")
print(feature)
records_dict.close()
</code>
<code>
#Leer secuencias de este fichero
file_iter = SeqIO.parse('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.fna', 'fasta')
n_features = 0
#para cada secuencia cogemos las caracteristicas
for seqrecord in file_iter:
n_features += len(seqrecord.features)
print(n_features, "features found")
#esto da 0 porque un fasta no tiene caracteristicas
</code>
<code>
file_iter = SeqIO.parse('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
#file_iter = SeqIO.parse('plamodium_falciparum/GCF_000002765.3_ASM276v1_genomic.gbff', 'genbank')
n_features = 0
for seqrecord in file_iter:
n_features += len(seqrecord.features)
print(n_features, "features found")
#aqui si podemos leer las caracteristicas
</code>
<code>
record = SeqIO.read('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
position = 10000
for feature in record.features:
if position in feature:
print(feature)
print('-----------------------')
</code>
<code>
d = SeqIO.index('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
record = d['NC_000907.1'] # retreieve cromosom
position = 1000 # Position of interest inside the cromosome
# printing publications that studied that cromosome and position
for ref in record.annotations['references']:
if any([position in loc for loc in ref.location]):
print(ref)
</code>
<code>
n_cds = 0
file_iter = SeqIO.parse('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
#file_iter = SeqIO.parse('plamodium_falciparum/GCF_000002765.3_ASM276v1_genomic.gbff', 'genbank')
for seqrec in file_iter:
for feature in seqrec.features:
if feature.type == 'CDS': #leemos el CDS
n_cds += 1
mRNA = feature.extract(seqrec) # para esta seq extraemos el RNA
try:
transl_table = 11
if 'transl_table' in feature.qualifiers:
transl_table = int(feature.qualifiers['transl_table'][0])
p = mRNA.seq.translate(table=transl_table, cds=True)
except:
print("Protein {0} in gene {1} could not be translated!".
format(feature.qualifiers['protein_id'][0], seqrec.id))
print(n_cds)
</code>
<code>
record = SeqIO.read('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
print(record.id)
print("# of feature: ", len(record.features))
print("# of annotations: ", len(record.annotations))
print(record.features[1200])
sub_record = record[612623:620000]
print(sub_record.id)
print("# of feature: ", len(sub_record.features))
print("# of annotations: ", len(sub_record.annotations))
print(sub_record.features[0])
</code>
<code>
n_cds = 0
file_iter = SeqIO.parse('haemophilus_influenzae/GCF_000027305.1_ASM2730v1_genomic.gbff', 'genbank')
#file_iter = SeqIO.parse('plamodium_falciparum/GCF_000002765.3_ASM276v1_genomic.gbff', 'genbank')
for seqrec in file_iter:
for feature in seqrec.features:
if feature.type == 'CDS': #leemos el CDS
n_cds += 1
mRNA = feature.extract(seqrec) # para esta seq extraemos el RNA
try:
transl_table = 1
if 'transl_table' in feature.qualifiers:
transl_table = int(feature.qualifiers['transl_table'][0])
p = mRNA.seq.translate(table=transl_table, cds=True)
except:
print("Protein {0} in gene {1} could not be translated!".
format(feature.qualifiers['protein_id'][0], seqrec.id))
print(feature.qualifiers[438369.1])
print(n_cds)
</code>
| {
"filename": "biopython_notebook_1.ipynb",
"repository": "Deya-B/Bioinformatics-notes",
"query": "transformed_from_existing",
"size": 199361,
"sha": ""
} |
# 02-warmup-sol.ipynb
Repository: hanisaf/mist5730-6380-spring2020
Refer to [the University of Georgia by the Numbers Page](https://www.uga.edu/facts.php)
Reconstruct (most) of this page using markdown in this notebook
# UGA by the Numbers
**Founded:**
> January 27, 1785, by the Georgia General Assembly. UGA is the birthplace of public higher education in America.
**Location:**
> Athens, Georgia (Clarke County), about 60 miles northeast of downtown Atlanta.
**Size:**
> Main campus: 465 buildings on 762 acres.
> Total acreage in 31 Georgia counties: 39,743 acres
**Work Force**
> | | |
> |----------------------------------------------------------|-------------------|
> | **Faculty (instruction/research/public service)** | 3,119 |
> | **Administrative/other professional** | 3,213 |
> | **Technical/clerical/crafts/maintenance** | 4,524 |
> | **Total** | **10,856** |
**Annual Budget (FY 2018):**
> $1.64 billion
**Research (FY 2017)**
> Research and development expenditures: \$455 million
>
> Licensing Revenue: $10.6 million
**Schools and Colleges**
> The University of Georgia is a comprehensive land and sea grant institution composed of 17 schools and colleges. They are, in order of founding:
>
> - Franklin College of Arts and Sciences, 1801
> - College of Agricultural and Environmental Sciences, 1859
> - School of Law, 1859
> - College of Pharmacy, 1903
> - Daniel B. Warnell School of Forestry and Natural Resources, 1906
> - College of Education, 1908
> - Graduate School, 1910
> - C. Herman and Mary Virginia Terry College of Business, 1912
> - Henry W. Grady College of Journalism and Mass Communication, 1915
> - College of Family and Consumer Sciences, 1918
> - College of Veterinary Medicine, 1946
> - School of Social Work, 1964
> - College of Environment and Design, 1969
> - School of Public and International Affairs, 2001
> - College of Public Health, 2005
> - Eugene P. Odum School of Ecology, 2007
> - College of Engineering, 2012
>
> The university is also home to the Augusta University/University of Georgia Medical Partnership.
| {
"filename": "02-warmup-sol.ipynb",
"repository": "hanisaf/mist5730-6380-spring2020",
"query": "transformed_from_existing",
"size": 3939,
"sha": ""
} |
# SIMS_tutorial_4.ipynb
Repository: braingeneers/SIMS
## **SIMS Tutorial**
In this tutorial, we will walk through the [SIMS (Scalable, Interpretable Machine Learning for Single Cell)](https://www.cell.com/cell-genomics/fulltext/S2666-979X(24)00165-4) pipeline step by step. SIMS is a deep learning-based tool built on TabNet, a transformer-based model optimized for tabular data. It is designed to classify single-cell RNA sequencing (scRNA-seq) data while providing interpretable predictions.
By following this tutorial, you will learn how to:
- Download and prepare scRNA-seq data for SIMS
- Train a SIMS model to classify cell types
- Use the trained model to make predictions on new, unseen data
- Evaluate predictions using accuracy metrics and visualization tools
### **Before you begin**
The `scsims` package is only compatible with Python versions between 3.8 and 3.11. Run the following cell to ensure you are using a compatible version (we recommend Python 3.9 for optimal performance).
<code>
!python --version
</code>
If you're running this notebook for the first time, uncomment and execute the following line to **install the required packages:**
<code>
# !pip install --use-pep517 git+https://github.com/braingeneers/SIMS.git
</code>
Once the packages are installed, we can **import them:**
<code>
import scanpy as sc
import pandas as pd
import anndata as ad
import os
import torch
from scsims import SIMS
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
</code>
### **Step 1: Prepare data**
Before we can train our model, we need to prepare the data. This involves several key steps to ensure that the dataset is structured correctly and optimized for machine learning.
#### `1.a` Download data
The dataset we are using comes from a study published in eLife by Domingo-Gonzalez et al. (2020). This study investigated immune cells in the developing mouse lung at single-cell resolution. It contains only 4,052 cells, making it ideal for this notebook (large enough for meaningful training but small enough for computational efficiency).
🔗 Source:
- [UCSC Cell Browser website](https://cells.ucsc.edu/?bp=lung&org=Mouse+(M.+musculus)) – where we download the dataset.
- [Domingo-Gonzalez et al., 2020 (eLife)](https://elifesciences.org/articles/56890) – original study providing the data.
<code>
!curl -O https://cells.ucsc.edu/mouse-lung-immune/exprMatrix.tsv.gz
!curl -O https://cells.ucsc.edu/mouse-lung-immune/meta.tsv
</code>
#### `1.b` Load the Anndata object
In this step, we will load the gene expression matrix and associated metadata, clean the data, and convert it into an AnnData object for further processing.
<code>
# Load expression matrix
expr_matrix = pd.read_csv("exprMatrix.tsv.gz", sep="\t", index_col=0)
# Load metadata
meta = pd.read_csv("meta.tsv", sep="\t", index_col=0)
# This particular dataset stores gene names in a 'GENE|GENE' format
# This step removes duplicate gene names if they exist
if "|" in expr_matrix.index[0]:
expr_matrix.index = expr_matrix.index.str.split('|').str[0]
# Transpose to make cells as rows and genes as columns (CELL x GENE)
expr_matrix = expr_matrix.T
# Ensure indices match
expr_matrix = expr_matrix.loc[meta.index]
# Create AnnData object
adata = ad.AnnData(expr_matrix)
# Add metadata to AnnData object
adata.obs = meta
# Show basic info
print(adata)
</code>
#### `1.c` Select target feature
In this example dataset, cell type labels are stored in the 'Cell Subtype' column, so we will remove other columns as they are not needed for training.
🔹 **Note:** If you're using a different dataset, replace 'Cell Subtype' with the appropriate column that contains your cell type annotations. Run `adata.obs.columns` to check available columns in your dataset before selecting the target feature.
<code>
class_label = 'Cell Subtype'
adata.obs = adata.obs[[class_label]] # Filter out other columns
unique_classes = adata.obs[class_label].unique()
print("Cell types: ", unique_classes)
</code>
#### `1.d` Preprocess the data
Now we need to preprocess the data to ensure it is clean, normalized, and ready for model training.
🔹 **Important:** The same preprocessing steps must be applied consistently to both the training dataset and any new data used for inference to ensure compatibility.
<code>
# Perform some light filtering
sc.pp.filter_cells(adata, min_genes=100)
sc.pp.filter_genes(adata, min_cells=3)
# Transform the data for model ingestion
sc.pp.normalize_total(adata)
sc.pp.log1p(adata)
sc.pp.scale(adata)
</code>
#### `1.e` Split data
In real-world applications, SIMS is typically used with two datasets: a **labeled** dataset for training and an **unlabeled** dataset for generating predictions. Since we don't have a separate unlabeled dataset for this example, we'll split our labeled dataset into a **training set** (used to train the model) and a **test set** (used to evaluate its performance). This allows us to assess how well SIMS generalizes to unseen data.
🔹 **Note:** If you are using your own dataset, do not split it. Instead, load your test data separately during the prediction step. Make sure to preprocess your test data using the same steps applied to your training data to maintain consistency and avoid errors.
<code>
# Split cells into train and test (ex: 80% train, 20% test)
train_cells, test_cells = train_test_split(adata.obs_names, test_size=0.2, random_state=42)
# Create training and "unlabeled" test datasets
adata_train = adata[train_cells].copy()
adata_test = adata[test_cells].copy()
</code>
### **Step 2: Train the model with SIMS**
Now that our dataset is preprocessed, we can train a machine learning model using SIMS. SIMS is built on [TabNet](https://arxiv.org/abs/1908.07442), a deep learning architecture optimized for tabular data. It allows us to classify immune cells based on gene expression while maintaining model interpretability.
#### `2.a` Initialize SIMS model
We first create a SIMS object using the training dataset (`adata_train`) and specify the cell type label column:
<code>
sims = SIMS(data=adata_train, class_label=class_label)
</code>
#### `2.b` Set up model parameters
Next, we configure the model using setup_model():
<code>
sims.setup_model(n_a=64, n_d=64, weights=sims.weights)
</code>
- `n_a=64` and `n_d=64` define the number of attention and decision steps in the TabNet architecture.
- `weights=sims.weights` ensures that the model adjusts for imbalanced cell types by weighting the loss inversely to label frequency. This helps the model learn rare cell types more effectively.
#### `2.c` Define Checkpointing Strategy
Since training deep learning models takes time, we save the best version using a checkpointing system:
<code>
checkpoint_callback = ModelCheckpoint(
dirpath="./sims", # Save in the notebook's current directory
filename="sims_model", # File will be saved as 'sims_model.ckpt'
save_top_k=1, # Keep only the best checkpoint
monitor="val_loss", # Save the best model based on validation loss
mode="min", # Lower validation loss is better
)
</code>
#### `2.d` Configure the Training Process
We set up the training loop using a PyTorch Lightning trainer:
<code>
sims.setup_trainer(
callbacks=[
EarlyStopping(monitor="val_loss", patience=50), # Stop training if validation loss does not improve for 50 epochs
checkpoint_callback, # Save the best model
],
max_epochs=50, # Complete training cycles through the dataset, can increase
accelerator="cpu", # Forces training on CPU, can switch to GPU/MPS if available
devices=1, # Uses one CPU/GPU device
logger=False, # Disable/enable logging
)
</code>
🔹 **Note:** Weights & Biases (WandB) logging has been disabled in this example for simplicity. However, if you want to use WandB logging, follow these steps **before configuring the training process** (in the previous cell):
1. Import WandbLogger: `from pytorch_lightning.loggers import WandbLogger`
2. Initialize and set offline to True: `logger = WandbLogger(offline=True)`
3. In `sims.setup_trainer()`, change code below to `logger=logger`
#### `2.e` Train and save model
Once everything is set up, we train the model with:
<code>
sims.train()
</code>
After training, we verify that a trained model was saved:
<code>
print("Saved model:", [f for f in os.listdir() if f.endswith(".ckpt")])
</code>
### **Step 3: Predict on new data with trained model**
#### `3.a` Load the Trained Model
To make predictions, we first load the trained SIMS model from the checkpoint:
<code>
sims = SIMS(weights_path="sims_model.ckpt", map_location=torch.device('cpu'))
</code>
- `weights_path="sims_model.ckpt"` loads the model checkpoint.
- `map_location=torch.device('cpu')` ensures the model runs on CPU (set to "cuda" if using a GPU).
#### `3.b` Predict cell types
We then use the model to predict cell types for the test dataset (`adata_test`). This returns a DataFrame containing predicted labels for each cell.
<code>
cell_predictions = sims.predict(adata_test)
</code>
#### `3.c` Align predictions with cell names, add predictions to AnnData object, and view
To ensure that the predictions are correctly indexed, we align them with `adata_test.obs_names`. We then merge the predictions with the metadata of the test dataset (`adata_test.obs`) and can view the predictions.
<code>
cell_predictions.index = adata_test.obs_names # Align predictions with `adata_test`
adata_test.obs = adata_test.obs.join(cell_predictions, rsuffix='_pred')
print(adata_test.obs.head())
</code>
### **Step 4: Evaluate and visualize**
Once we have predicted cell types, we can evaluate how well the model performed. Each of these steps is optional.
#### `4.a` Calculate model accuracy
We compute accuracy, which measures the proportion of correctly classified cells:
<code>
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(adata_test.obs[class_label], adata_test.obs["pred_0"])
print(f"Model Accuracy: {accuracy:.2f}")
</code>
#### `4.b` Generate a classification report
A classification report provides detailed performance metrics for each cell type, such as:
- Precision – How many of the predicted cells are actually correct?
- Recall – How many of the true cells were correctly classified?
- F1-score – A balanced metric combining precision and recall.
<code>
from sklearn.metrics import classification_report
report = classification_report(
adata_test.obs[class_label],
adata_test.obs["pred_0"],
zero_division=0 # Set to 0 instead of raising a warning
)
print(report)
</code>
#### `4.c` Compute and visualize confusion matrix
To see how predictions compare to actual labels, we compute a confusion matrix, where:
- Each row represents actual cell types.
- Each column represents predicted cell types.
- Diagonal values indicate correct classifications, while off-diagonal values show misclassifications.
<code>
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
# Compute confusion matrix
cm = confusion_matrix(adata_test.obs[class_label], adata_test.obs["pred_0"])
# Normalize by row (convert counts to percentages)
cm_normalized = cm.astype("float") / cm.sum(axis=1, keepdims=True)
# Get all unique cell types
labels = sorted(adata_test.obs[class_label].unique())
# Plot normalized confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(cm_normalized, annot=True, fmt=".2f", xticklabels=labels, yticklabels=labels, cmap="Blues")
# Add labels and title
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Normalized Confusion Matrix (Row Percentage)")
plt.show()
</code>
### **Step 5: View Explainability**
Interpreting machine learning models is critical, especially in biological research, where understanding why a model makes predictions can reveal important biological insights. SIMS uses TabNet's built-in explainability to identify which genes contribute most to its predictions.
The explainability matrix assigns an importance score to each gene based on how much it influenced the model's classification decisions. This allows us to determine the top contributing genes and assess whether they align with known biological markers.
#### `5.a` Generate the Explainability Matrix
We will use `sims.explain()` to compute feature importance scores for each gene. This produces an explainability matrix, where each value represents how much a specific gene contributed to the model’s classification decision for a given cell. We then convert it to a pandas DataFrame for easy manipulation and compute the average importance score for each gene across all cells.
<code>
# Run explainability analysis
explainability_matrix = sims.explain(adata_test, num_workers=0, batch_size=32)[0]
# Convert explainability matrix to a DataFrame
explain_df = pd.DataFrame(explainability_matrix, columns=sims.model.genes)
# Compute average gene importance
gene_importance = explain_df.mean(axis=0)
</code>
#### `5.b` View most influential genes
Now that we have computed gene importance scores, we can identify and print the top n most influential genes (the ones that contributed the most to the model’s classification decisions).
<code>
top10_genes = gene_importance.nlargest(10) # Can increase to top 20, 30, etc.
print("Top 10 most important genes:")
print(top10_genes)
</code>
| {
"filename": "SIMS_tutorial_4.ipynb",
"repository": "braingeneers/SIMS",
"query": "transformed_from_existing",
"size": 119177,
"sha": ""
} |
# HiDENSEC.ipynb
Repository: songlab-cal/HiDENSEC
# Global Variables & Function Definitions
These global definitions require evaluation before running HiDENSEC on any concrete Hi-C map.
## Modules
<code>
import numpy as np
import scipy.sparse as sp_sparse
import scipy.signal as sp_signal
import scipy.ndimage as sp_image
from scipy.stats import kendalltau
import scipy.stats as sp_stats
</code>
<code>
rng = np.random.default_rng()
</code>
## Global variables
Load file paths, covariates & centromere locations.
<code>
chromosomelocations = np.loadtxt('chromosomelocations.txt')-1
compartmentnames = np.loadtxt('compartmentnames.txt', dtype='str')
compartments = np.loadtxt('compartments.txt', dtype='str')
centromers = np.loadtxt('centromers.txt')-1
newCentromers = np.loadtxt('newCentromers.txt')-1
rPos = np.loadtxt('rPos.txt')-1
fPos = np.loadtxt('fPos.txt')-1
acrox = np.loadtxt('acrox.txt')
corrchromlocations = np.loadtxt('corrchromlocations.txt')-1
centromersCorrected = np.loadtxt('centromersCorrected.txt')-1
covariates = np.loadtxt('covariates.csv', delimiter=',')
excursionLengthsH0 = np.loadtxt('excursionLengthsH0.csv', delimiter=',')
fixCdata = np.loadtxt('fixCdata.csv', delimiter=',', dtype='object')
hiCdata = np.loadtxt('hiCdata.csv', delimiter=',', dtype='object')
</code>
## Covariate Correction
Function definitions pertaining to covariate regression.
<code>
def partitionByIndex(list, indices):
return np.split(list, indices)
def partitionByLength(list, lengths):
return partitionByIndex(list, np.cumsum(lengths))
def findDiploidPeak(data, window=[0.1,1,0.1]):
clean_data = data[data>0]
scale = np.median(np.abs(clean_data - np.median(clean_data)))
modes = []
for c in np.arange(window[0], window[1] + window[2], window[2]):
counts, bins = np.histogram(clean_data, bins=np.arange(clean_data.min(), clean_data.max(), c*scale))
mode_index = int(np.median(np.argmax(counts)))
modes.append((bins[mode_index] + bins[mode_index+1])/2)
return np.mean(modes)
def filterPosition(dataset, cutthreshold=75, GCthreshold=0.32, mapthreshold=0.8):
covariate_part = dataset[:, 1:4].astype(float)
compartment_part = dataset[:,4]
return (covariate_part[:,0]>cutthreshold) & (covariate_part[:,1]>GCthreshold) & (covariate_part[:,2]>mapthreshold) & (compartment_part!='A0')
def covariateFilter(dataset, cutthreshold=75, GCthreshold=0.32, mapthreshold=0.8):
return dataset[filterPosition(dataset, cutthreshold, GCthreshold, mapthreshold)]
def covariateCorrection1(dataset, cutthreshold=75, GCthreshold=0.32, mapthreshold=0.8, midpointwindow=[0.1,1,0.1], neighbourhood=0.3, filterlength=100):
data = covariateFilter(dataset, cutthreshold, GCthreshold, mapthreshold)
midpoints = [ findDiploidPeak(data[data[:,4] == compartment_name, 0].astype(float), midpointwindow) for compartment_name in compartmentnames[1:] ]
inserted_midpoints = np.copy(data[:,-1])
for j in range(5):
inserted_midpoints[inserted_midpoints == compartmentnames[1+j]] = midpoints[j]
compartmentcorrected = 2*data[:,0].astype(float)/inserted_midpoints.astype(float)
predictions = np.copy(data[:, -1])
for compartment_name in compartmentnames[1:]:
compartment_indices = (data[:, -1] == compartment_name) & (np.abs(compartmentcorrected - 2) < neighbourhood)
compartment_covariates = data[compartment_indices][:, [1,2]].astype(float)
compartment_values = compartmentcorrected[compartment_indices].astype(float)
design_mat = np.concatenate( [np.ones([compartment_indices.sum(),1]), compartment_covariates], axis=1 )
fit = np.linalg.lstsq(design_mat, compartment_values, rcond=None)
full_compartment_indices = (data[:, -1] == compartment_name)
full_compartment_covariates = data[full_compartment_indices][:, [1,2]].astype(float)
full_design_mat = np.concatenate( [np.ones([full_compartment_indices.sum(),1]), full_compartment_covariates], axis=1 )
predictions[predictions == compartment_name] = full_design_mat @ fit[0]
fully_corrected_data = 2*compartmentcorrected / predictions.astype(float)
fully_corrected_data = 2*fully_corrected_data / findDiploidPeak(fully_corrected_data)
return fully_corrected_data
def attachCovariates(profile):
return np.concatenate([profile[:,None], np.transpose(covariates), compartments[:,None]], axis=1)
def rawToCorrected(x, filter):
if type(x) == int:
return int(np.median(np.argmin(np.abs(x-filter))))
else:
return [int(np.median(np.argmin(np.abs(y-filter)))) for y in x]
def xToChi(x):
return np.argmax(x < corrchromlocations[1:])+1
def dataCcorrector(profile, data_flag, cutthreshold=75, GCthreshold=0.32, mapthreshold=0.8, midpointwindow=[0.1,1,0.1], neighbourhood=0.3, filterlength=100):
test_data = attachCovariates(profile)
test_data = covariateFilter(test_data, cutthreshold, GCthreshold, mapthreshold)
if data_flag == 'HiC':
reference_data = np.copy(hiCdata)
elif data_flag == 'FixC':
reference_data = np.copy(fixCdata)
else:
return 'Unknown protocol'
reference_data = reference_data[:, [4, 1, 2, 3, 0]]
data = covariateFilter(reference_data, cutthreshold, GCthreshold, mapthreshold)
midpoints = [ findDiploidPeak(data[data[:,4] == ('"'+compartment_name+'"'), 0].astype(float), midpointwindow) for compartment_name in compartmentnames[1:] ]
inserted_midpoints = np.copy(data[:,-1])
for j in range(5):
inserted_midpoints[inserted_midpoints == ('"'+compartmentnames[1+j]+'"')] = midpoints[j]
compartmentcorrected = 2*data[:,0].astype(float)/inserted_midpoints.astype(float)
test_inserted_midpoints = np.copy(test_data[:,-1])
for j in range(5):
test_inserted_midpoints[test_inserted_midpoints == (compartmentnames[1+j])] = midpoints[j]
test_compartmentcorrected = 2*test_data[:,0].astype(float)/test_inserted_midpoints.astype(float)
predictions = np.copy(test_data[:, -1])
for compartment_name in compartmentnames[1:]:
compartment_indices = (data[:, -1] == ('"'+compartment_name+'"')) & (np.abs(compartmentcorrected - 2) < neighbourhood)
compartment_covariates = data[compartment_indices][:, [1,2]].astype(float)
compartment_values = compartmentcorrected[compartment_indices].astype(float)
design_mat = np.concatenate( [np.ones([compartment_indices.sum(),1]), compartment_covariates], axis=1 )
fit = np.linalg.lstsq(design_mat, compartment_values, rcond=None)
full_compartment_indices = (test_data[:, -1] == compartment_name)
full_compartment_covariates = test_data[full_compartment_indices][:, [1,2]].astype(float)
full_design_mat = np.concatenate( [np.ones([full_compartment_indices.sum(),1]), full_compartment_covariates], axis=1 )
predictions[predictions == compartment_name] = full_design_mat @ fit[0]
fully_corrected_data = 2*test_compartmentcorrected / predictions.astype(float)
fully_corrected_data = 2*medianFilter(fully_corrected_data, 100) / findDiploidPeak(medianFilter(fully_corrected_data, 100))
return fully_corrected_data
</code>
## Copy number & Mixture Proportion Estimation
Function definitions calculating effective copy number profiles.
<code>
def medianDeviation(list):
return np.median(np.abs(list - np.median(list)))
def splitList(list, index):
return ([list[:index], list[index:]], [index / len(list), 1-index/len(list)])
def findRestrictedMedian(list, candidates):
epsilon_list = [np.mean(np.abs(list - candidate)) for candidate in candidates]
minPos = np.argmin(epsilon_list)
return (candidates[minPos], epsilon_list[minPos], medianDeviation(list))
def findSplit(data, candidates):
restricted_medians = []
for j in range(2, len(data) - 1):
splits = splitList(data, j)
split_deviations = [np.sum(findRestrictedMedian(split, candidates)[1:]) for split in splits[0]]
restricted_medians.append(np.array(split_deviations)@np.array(splits[1]))
return (np.argmin(restricted_medians)+1, np.min(restricted_medians))
def paddedPartition(list, window):
a = [list[:window] for _ in range(round((window-1)/2))]
b = [list[j:window+j] for j in range(len(list)-window+1)]
c = [list[:window] for _ in range(round((window-1)/2))]
return np.concatenate([a,b,c])
def copyNumberFilter(data, candidates, window=101):
return [findRestrictedMedian(part, candidates) for part in paddedPartition(data, window)]
def copyNumberVariance(data, candidates, window=101):
restricted_medians = [findRestrictedMedian(part, candidates) for part in paddedPartition(data, window)]
return np.mean([z[0]*z[1]/z[2] for z in restricted_medians])
def listCandidates(f, maxPloidy):
return 2*(1-f)+f*np.arange(1, maxPloidy+1)
def computeChangePoints(profile):
return np.arange(len(profile)-1)[np.abs(np.diff(profile)) > 0]
def refineChangePoint(data, pt, candidates, window=250, replicates=100):
w = round((window-1)/2)
split = findSplit(data[pt-w:pt+w+1], candidates)
a = pt-w-1+split[0]
replicates = [findSplit(rng.choice(data[pt-w:pt+w+1], 2*w+1), candidates)[-1] for _ in range(replicates)]
return (a, (replicates > split[-1]).sum() / len(replicates))
def refineChangePoints(data, pts, candidates, window=250, replicates=100):
extended_pts = np.concatenate(([0], pts, [len(rPos)-1]))
parts = [extended_pts[j:j+3] for j in range(len(extended_pts)-2)]
ws = [ np.min(np.append(np.diff(part)/2, window)) for part in parts ]
res = []
for pt, w in zip(pts, ws):
if w <= 3.5:
res.append((pt, 0))
else:
res.append(refineChangePoint(data, pt, candidates, w, replicates))
return res
def refineProfile(profile, pts):
extended_pts = np.concatenate(([0], pts, [len(rPos)]))
refined_segments = []
for j in range(len(extended_pts)-1):
a = int(extended_pts[j])
b = int(extended_pts[j+1])
values, counts = np.unique(profile[a:b+1], return_counts=True)
index = np.argmax(counts)
refined_segments = np.append(refined_segments, [values[index] for _ in range(b-a)])
return refined_segments
def flattenExcursions(profile, threshold=200):
pts = computeChangePoints(profile)
partitioned_profile = partitionByIndex(profile, pts)
pos = np.array([ (len(part) <= threshold) for part in partitioned_profile])
flattened_profile = []
for j, part in enumerate(partitioned_profile):
n = len(part)
if (n<=threshold) & (0 < j < len(partitioned_profile)-1):
flattened_profile.append(np.concatenate(([partitioned_profile[j-1][0] for _ in range(round(n/2))], [partitioned_profile[j+1][0] for _ in range(round((n+1)/2)) ])))
else:
flattened_profile.append(partitioned_profile[j])
return np.concatenate(flattened_profile)
def rankExcursions(data, refinedProfile, changePoints):
partitioned_profile = partitionByIndex(refinedProfile, changePoints[:,0].astype(int))
partitioned_data = partitionByIndex(data, changePoints[:,0].astype(int))
H0 = data[(1.99 <= refinedProfile) & (refinedProfile <= 2.01)]
extended_change_points = np.concatenate([[0.5], changePoints[:,1], [0.5]])
intervalP = [(extended_change_points[j] + extended_change_points[j+1]) for j in range(len(extended_change_points)-1)]
def extractBulk(list):
n = np.max([round(0.2*len(list)),1])
return list[n:-n]
def findClosestDiploidBlock(blocks, k):
a, b = np.nonzero(blocks < k)[0], np.nonzero(blocks > k)[0]
if len(a)>0:
if len(b)>0:
return [blocks[a[-1]], blocks[b[0]]]
else:
return [blocks[a[-1]]]
elif len(b)>0:
return [blocks[b[0]]]
else:
return []
block_ploidy = np.array([part[0] for part in partitioned_profile])
diploidBlocks = np.nonzero((block_ploidy <= 2.01) & (1.99 <= block_ploidy))[0]
stat_vals_1 = []
for part_profile, part_data, j in zip(partitioned_profile, partitioned_data, range(len(partitioned_profile))):
a = np.mean((excursionLengthsH0 <= len(part_profile)))
sample_data = np.concatenate([partitioned_data[j] for j in findClosestDiploidBlock(diploidBlocks, j)])
sampled_data = rng.choice(sample_data, size=(100, len(extractBulk(part_profile))))
dataH0 = [medianDeviation(sampled_point) for sampled_point in sampled_data]
threshold = 2 / part_profile[0] * medianDeviation(extractBulk(part_data))
b = np.mean(dataH0 > threshold)
stat_vals_1.append(np.max([a,b])**2)
def uniformSumCDF(x):
return np.piecewise(x, [x < 0, (0 <= x) & (x < 1), (1 <= x) & (x < 2), 2 <= x], [0, lambda x: x**2/2, lambda x: 1-0.5*(2-x)**2, 1])
stat_vals_2 = uniformSumCDF(np.array([interval.sum() for interval in intervalP]))
stat_vals = stat_vals_1 + stat_vals_2
return uniformSumCDF(stat_vals)
def medianFilter(data, window):
return np.array([np.median(data[j:j+window]) for j in range(len(data)-window+1)])
def extractExcursions(profile, points):
extended_points = np.concatenate([[0], points, [len(rPos)]])
excursion_indices = [[extended_points[j], extended_points[j+1]] for j in range(len(extended_points)-1)]
excursions = partitionByIndex(profile, points)
excursion_means = np.array([np.mean(excursion) for excursion in excursions])
return np.arange(len(excursion_indices))[np.abs(excursion_means - 2) > 0.001]
def excursionLengths(profile, points):
pos = extractExcursions(profile, points)
return [len(partitionByIndex(profile, points)[j]) for j in pos]
def benjaminiHochberg(pvalues, alpha):
ps = np.sort(1-pvalues) - alpha / len(pvalues) * np.arange(1, len(pvalues)+1)
indices = np.nonzero(ps > 0)[0]
if len(indices) == 0:
return 0
else:
return 1+indices[0]
def confidenceInterval(data, profile, f, resample_size=100):
pts = computeChangePoints(profile)
partitioned_profile = partitionByIndex(profile, pts)
partitioned_data = partitionByIndex(data, pts)
pos = extractExcursions(profile, pts)
partitioned_profile_pos = [partitioned_profile[j] for j in pos]
excursion_ploidy = np.abs([ 2 - (np.unique(profile_part)[0] - 2*(1-f)) / 2 for profile_part in partitioned_profile_pos ])
excursion_data = [partitioned_data[j] for j in pos]
excursion_lengths = np.array([len(profile_part) for profile_part in partitioned_profile_pos])
resample_sizes = resample_size * excursion_lengths / np.sum(excursion_lengths)
fluctuations = []
for data_point, size, ploidy in zip(excursion_data, resample_sizes, excursion_ploidy):
fluctuations = np.append(fluctuations, [np.abs(np.median(rng.choice(data_point, len(data_point))) - 2) / ploidy for _ in range(int(size))])
return 2*medianDeviation(fluctuations)
def findThreshold(pvalues, alpha):
delta = 1
iter = 1
sorted_ps = np.sort(pvalues)[::-1]
while (delta>0) & (iter<np.min([1000, len(pvalues)-1])):
delta = benjaminiHochberg(sorted_ps[:iter+1], alpha) - benjaminiHochberg(sorted_ps[:iter], alpha)
iter+=1
return iter
def estimate_proportion_ploidy(rowsums, maxPloidy):
data = rowsums
y = int(corrchromlocations[10])
smoothed_data = 2*medianFilter(data[:y], 100) / findDiploidPeak(medianFilter(data[:y], 100))
sigma = []
for j in tqdm(np.arange(0,1.01,0.01)):
sigma.append(copyNumberVariance(smoothed_data, 2*(1-j) + j*np.arange(1, maxPloidy+1), 251))
f = np.arange(0, 1.01, 0.01)[np.argmin(sigma)]
pi0 = np.array(copyNumberFilter(rowsums, 2*(1-f) + f*np.arange(1, maxPloidy+1), 501))
xs = np.array(refineChangePoints(data, computeChangePoints(pi0[:,0]), listCandidates(f, maxPloidy), 250))
pi1 = refineProfile(pi0[:,0], xs[:,0])
ps = rankExcursions(data, pi1, xs)
return pi1, f, ps
</code>
## Off-diagonal detection
Function definitions detecting fusion events of type (a) and (b)
<code>
def roundToChromosome(x,y):
chiRange = [xToChi(z) for z in range(x,y+1)]
chis, counts = np.unique(chiRange, return_counts=True)
chi = int(np.median(chiRange))
if np.max(counts / np.sum(counts)) < 0.6:
acs = []
for j in chis:
a = np.max([x, corrchromlocations[j-1]])
b = np.min([y, corrchromlocations[j] - 1])
if corrchromlocations[xToChi(b)]-1-b < 50:
c = corrchromlocations[xToChi(b)] - 1
else:
c = b
acs.append([a,c])
acs = np.array(acs, dtype=int)
acs = acs[(acs[:,1] - acs[:,0]) > 200]
return acs
else:
chi_nearest_range = np.arange(corrchromlocations[chi-1], corrchromlocations[chi]-1)
nearest_x = chi_nearest_range[np.abs(chi_nearest_range - x).argmin()]
nearest_y = chi_nearest_range[np.abs(chi_nearest_range - y).argmin()]
if corrchromlocations[xToChi(nearest_x)]-1-nearest_x < 50:
a = corrchromlocations[xToChi(nearest_x)]-1
else:
a = nearest_x
if corrchromlocations[xToChi(nearest_y)]-1-nearest_x < 50:
b = corrchromlocations[xToChi(nearest_y)]-1
else:
b = nearest_y
return np.array([a, b], dtype=int)
def scanBlock1(block, ws):
ws_prod = np.product(ws)
return sp_signal.oaconvolve(np.ones(ws), block.toarray(), mode='valid') / ws_prod
def scanBlock2(mat, chi1, chi2, ws):
a = corrchromlocations[chi1-1]
b = corrchromlocations[chi1]
c = corrchromlocations[chi2-1]
d = corrchromlocations[chi2]
ws_prod = np.product(ws)
return sp_signal.oaconvolve(np.ones(ws), mat.toarray()[int(a):int(b), int(c):int(d)], mode='valid') / ws_prod
def computeIntensitySquare(mat, x, y, ws):
a = np.max([x-ws[0]+1, 0])
b = np.min([x+ws[0]+1, len(rPos)])
c = np.max([y-ws[1]+1, 0])
d = np.min([y+ws[1]+1, len(rPos)])
mat11 = mat[a:x+1, c:y+1]
mat12 = mat[a:x+1, y+1:d]
mat21 = mat[x+1:b, c:y+1]
mat22 = mat[x+1:b, y+1:d]
return np.array([[mat11.mean(), mat12.mean()], [mat21.mean(), mat22.mean()]])
def detectPattern(intensitySquare):
normalization_constant = intensitySquare.sum()
if normalization_constant <= 10**(-8):
return 0.
else:
normalized_densities = np.concatenate(intensitySquare) / normalization_constant
return np.max(normalized_densities)
def rowPattern(mat, x, ws):
chi = xToChi(x)
n = len(rPos)
w = ws[-1]
y_indices = np.concatenate([np.arange(0, corrchromlocations[chi-1]), np.arange(corrchromlocations[chi], n)])
r_block = mat[x-ws[0]+1:x+1, y_indices]
r = np.concatenate(scanBlock1(r_block, ws))
s_block = mat[x+1:x+ws[0]+1, y_indices]
s = np.concatenate(scanBlock1(s_block, ws))
detected_patterns = []
for j in range(len(r)-w):
pattern_mat = np.array([[r[j], r[j+w]], [s[j], s[j+w]]])
detected_patterns.append(detectPattern(pattern_mat))
return np.array(detected_patterns)
def testTreeStructure(mat, x, y, w, resamples=100):
submatrix = mat[np.max([x-w[0],0]):np.min([x+w[0]+1, len(rPos)]), np.max([y-w[1],0]):np.min([y+w[1]+1, len(rPos)])]
rowMarginals = submatrix.mean(axis=1)
columnMarginals = submatrix.mean(axis=0)
rowStatistic = np.mean([np.var(rowMarginals[:w[0]]) * w[0] / (w[0]-1), np.var(rowMarginals[w[0]+1:]) * len(rowMarginals[w[0]+1:]) / (len(rowMarginals[w[0]+1:]) - 1)])
columnStatistic = np.mean([np.var(columnMarginals[:w[0]]) * w[0] / (w[0]-1), np.var(columnMarginals[w[0]+1:]) * len(columnMarginals[w[0]+1:]) / (len(columnMarginals[w[0]+1:]) - 1)])
resampledRowStatistic = []
resampledColumnStatistic = []
for _ in range(resamples):
permutedRows = rng.permutation(rowMarginals)
permutedColumns = rng.permutation(columnMarginals)
resampledRowStatistic.append(np.mean([np.var(permutedRows[:w[0]]) * w[0] / (w[0]-1),
np.var(permutedRows[w[0]+1:]) * len(permutedRows[w[0]+1:]) / (len(permutedRows[w[0]+1:]) - 1)]))
resampledColumnStatistic.append(np.mean([np.var(permutedColumns[:w[0]]) * w[0] / (w[0]-1),
np.var(permutedColumns[w[0]+1:]) * len(permutedColumns[w[0]+1:]) / (len(permutedColumns[w[0]+1:]) - 1)]))
return np.mean([np.mean(resampledRowStatistic > rowStatistic), np.mean(resampledColumnStatistic > columnStatistic)])
def extractButterflyCandidate(mat, chi1, chi2, ws, componentDepth=10):
blockScan = scanBlock2(mat, chi1, chi2, ws)
components = sp_image.label(np.where(blockScan > np.median(blockScan), blockScan, 0))[0]
component_groups, component_counts = np.unique(components, return_counts=True)
size_threshold = np.sort(component_counts)[-componentDepth]
large_groups = component_groups[component_counts >= size_threshold]
large_group_sizes = component_counts[component_counts >= size_threshold]
group_maxs = [[np.unravel_index(np.where(components == group, blockScan, 0).argmax(), blockScan.shape), round(np.sqrt(size/2))]
for group, size in zip(large_groups, large_group_sizes)]
return group_maxs
def pickLargeCandidate(mat, x, y, ws):
localMat = mat[np.max([x-ws[0], 1]):np.min([x+ws[0], len(rPos)]), np.max([y-ws[1], 1]):np.min([y+ws[1], len(rPos)])]
return np.array(np.unravel_index(localMat.argmax(), localMat.shape)) + np.array([1+x,1+y]) - np.array([1+ws[0], 1+ws[1]])
def butterflySummary(mat, x, y, ws):
intensitySquare = computeIntensitySquare(mat, x, y, ws)
if intensitySquare.sum() <= 0:
intensitySquare = np.zeros([2,2])
else:
intensitySquare = intensitySquare / intensitySquare.sum()
return -np.linalg.det(intensitySquare)
def findButterflySummary(mat, x, y, ws, symmetry=0):
a = np.max([0, x-ws[0]])
b = np.min([len(rPos), x+ws[0]+1])
c = np.max([0, y-ws[1]])
d = np.min([len(rPos), y+ws[1]+1])
localMat = np.array([[butterflySummary(mat, j, k, [50, 50]) for k in np.arange(c,d)] for j in np.arange(a,b)])
if symmetry == 0:
argmax = np.unravel_index(localMat.argmax(), localMat.shape)
return [np.array(argmax) + np.array([x, y]) - np.array(ws), localMat[argmax]]
else:
argmin = np.unravel_index(localMat.argmin(), localMat.shape)
return [np.array(argmin) + np.array([x, y]) - np.array(ws), localMat[argmin]]
return localMat
def treeStatistic(mat, x, y, w):
a11 = mat[np.max([0, x-w-1]):x, np.max([0, y-w]):y+1][::-1]
a12 = np.transpose(mat[np.max([0, x-w-1]):x, y+1:np.min([len(rPos), y+w+2])])
a21 = mat[x:np.min([len(rPos), x+w+1]), np.max([0, y-w]):y+1]
a22 = np.transpose(mat[x:np.min([len(rPos), x+w+1]), y+1:np.min([len(rPos), y+2+w])][::-1])
quad_stats = []
for quad in [a11, a12, a21, a22]:
w_steps, diags = np.transpose(np.array([[j, np.diagonal(quad.toarray(), j).mean()] for j in np.arange(-w, w+1)]))
if np.var(diags) <= 0:
quad_stats.append(0)
else:
quad_stats.append(kendalltau(w_steps, diags).statistic)
tree_stats = (1 + np.array(quad_stats).reshape([2,2]))/2
return -np.linalg.det(tree_stats)
def detectButterfly(intensitySquare, symmetry=0):
partition_sum = intensitySquare.sum()
if partition_sum == 0:
normalized_square = [[0,0], [0,0]]
else:
normalized_square = intensitySquare / partition_sum
if symmetry == 0:
return [normalized_square[1,0], normalized_square[0,1]]
else:
return [normalized_square[0,0], normalized_square[1,1]]
def testButterflyStructure(mat, x, y, w, resamples=100):
submatrix = mat[np.max([0, x-w[0]]):np.min([len(rPos), x+w[0]+1]), np.max([0, y-w[1]]):np.min([len(rPos), y+w[1]+1])].toarray()
def formSubmatrices(matrix):
return matrix[:w[0], :w[1]], matrix[:w[0], w[1]+1:], matrix[w[0]+1:, :w[1]], matrix[w[0]+1:, w[1]+1:]
treeStat = np.mean([matrix.var() * matrix.size / (matrix.size - 1) for matrix in formSubmatrices(submatrix)])
permutedTreeStat = []
for _ in range(resamples):
n, m = submatrix.shape
permutedSubmatrix = submatrix[rng.permutation(n), :][:, rng.permutation(m)]
permutedTreeStat.append(np.mean([matrix.var() * matrix.size / (matrix.size - 1) for matrix in formSubmatrices(permutedSubmatrix)]))
return (permutedTreeStat > treeStat).mean()
def testButterflyCandidate(mat, point, ws, diagonalw=10, resamples=10, symmetry=0):
largeCandidate = pickLargeCandidate(mat, *point, ws)
chis = np.sort([xToChi(point[0]), xToChi(point[1])])
if symmetry == 0:
shift = np.array([1, 0])
intensity_indices = np.array([[0,0], [0,-1], [1,-1], [1,0], [-1,1], [-1,2], [-2,2], [-2,1]])
else:
shift = np.array([0, 0])
intensity_indices = np.array([[0,0], [0,-1], [-1,-1], [-1,0], [1,1], [1,2], [2,2], [2,1]])
candidate = findButterflySummary(hic, *largeCandidate, np.round(np.array(ws) / 2).astype(int), symmetry)
candidate = [candidate[0] + shift, candidate[1]]
structure = testButterflyStructure(mat, *candidate[0], ws, resamples)
treeStat = treeStatistic(mat, *candidate[0], diagonalw)
blockScan = scanBlock2(mat, *chis, [3,3])
blockScan = blockScan[blockScan > 10**(-10)]
intensity_ratio_a = np.mean([mat[*index] for index in (candidate[0] + intensity_indices)]) - np.mean(blockScan)
intensity_ratio_b = np.std(blockScan) * np.sqrt(len(blockScan) / (len(blockScan) - 1))
intensity_ratio = intensity_ratio_a / intensity_ratio_b
detected_butterfly = detectButterfly(computeIntensitySquare(mat, *candidate[0], ws), symmetry)
return [candidate[0][0], candidate[0][1], candidate[1], treeStat, structure, intensity_ratio, detected_butterfly[0], detected_butterfly[1]]
def calibrateButterflyTests(testMat, ratioThreshold=5, symmetry=0):
flatten_test_matrix = testMat[:,:,[2,3]].reshape(21*21,2)
flatten_test_matrix = flatten_test_matrix[(np.abs(flatten_test_matrix).mean(axis=1) > 0) & (flatten_test_matrix.mean(axis=1) != np.nan)]
thetas = np.transpose([flatten_test_matrix.mean(axis=0), flatten_test_matrix.std(axis=0) * np.sqrt(len(flatten_test_matrix) / (len(flatten_test_matrix)-1))])
calibrated_values = np.zeros((testMat.shape)[:2])
if symmetry == 0:
for j in range(len(testMat)):
for k in range(len(testMat)):
element = testMat[j, k]
if (np.array_equal(element, np.zeros(8))) or (element[5] < ratioThreshold):
calibrated_values[j,k] = 0
else:
a = 1-sp_stats.norm(loc=thetas[0,0], scale=thetas[0,1]).cdf(element[2])
b = 1-sp_stats.norm(loc=thetas[1,0], scale=thetas[1,1]).cdf(element[3])
c = element[4]
print(element)
calibrated_values[j,k] = np.min([a, b, c])
return calibrated_values
</code>
# Analysis
Each of the following sections performs parts of the analysis described in the main paper.
## Load Hi-C Matrix
Replace "hi-c_matrix" with filename of interest
<code>
row_indices, col_indices, hic_vals = np.transpose(np.loadtxt('hi-c_matrix'))
relevant_indices = (row_indices < 57509 - 1) & (col_indices < 57509 - 1)
hic = sp_sparse.coo_array((hic_vals[relevant_indices], (row_indices[relevant_indices].astype(int)-1, col_indices[relevant_indices].astype(int)-1)), (57509, 57509))
hic = sp_sparse.csr_array(hic)
del row_indices, col_indices, hic_vals, relevant_indices
</code>
## Copy Number & Mixture Proportion Inference
Estimation of effective copy number profile.
<code>
# Un-comment the respective lines, if the experimental protocol (Fix-C or Hi-C) of the contact map in question is known.
rowsums = covariateCorrection1(attachCovariates(hic.diagonal()))
# rowsums = dataCcorrector(hic.diagonal(), 'FixC')
# rowsums = dataCcorrector(hic.diagonal(), 'HiC')
</code>
<code>
pi1, f, ps = estimate_proportion_ploidy(rowsums, 5)
</code>
## Off-diagonal inference
Filter Hi-C matrix by criteria specified in covariate correction
<code>
filtered_hic = hic[rPos,:][:,rPos]
</code>
### Type (a) events
<code>
# Extract change point candidates
candidates = np.abs(np.diff(medianFilter(pi1, 50))).nonzero()[0]
candidates = candidates[candidates < corrchromlocations[-1]]
aCandidates = []
extended_candidates = np.concatenate([[0], candidates, [len(rPos)]])
for j in range(1, len(extended_candidates)-1):
if np.max([extended_candidates[j] - extended_candidates[j-1], extended_candidates[j+1] - extended_candidates[j]]) > 100:
aCandidates.append(extended_candidates[j])
del candidates, extended_candidates, j
</code>
<code>
# Generate row-wise null distributions for extracted change-point candidates
mat = filtered_hic + filtered_hic.transpose() - sp_sparse.diags([filtered_hic.diagonal()], [0])
ws = [300, 300]
testPoints = np.concatenate([aCandidates, acrox]).astype(int)
aRow = []
for testPoint in testPoints:
if testPoint < np.min(ws):
aRow.append(rowPattern(mat, testPoint, [testPoint, testPoint]))
else:
aRow.append(rowPattern(mat, testPoint, ws))
del mat, ws, testPoints, testPoint
</code>
<code>
# Calculate summary statistics for each pair of change-point candidates
mat = filtered_hic + filtered_hic.transpose() - sp_sparse.diags([filtered_hic.diagonal()], [0])
ws = [300, 300]
testPoints = np.concatenate([aCandidates, acrox]).astype(int)
aSummary = np.nan * np.ones([len(testPoints), len(testPoints), 3])
for k in range(len(testPoints)):
for j in range(len(testPoints)):
x = testPoints[k]
y = testPoints[j]
rowSamples = aRow[k]
columnSamples = aRow[j]
target = detectPattern(computeIntensitySquare(mat, x, y, ws))
if (x>=y) or (xToChi(x) >= xToChi(y)) or (np.max([len(testPoints) - k, len(testPoints) - j]) <= 3):
aSummary[k, j] = [0, 0, 0]
else:
aSummary[k, j] = [(rowSamples <= target).mean(), (columnSamples <= target).mean(), testTreeStructure(mat, x, y, ws, 100) ]
del mat, ws, testPoints, k ,j
</code>
<code>
# Aggregate summary statistics into well-calibrated p-values
testPoints = np.concatenate([aCandidates, acrox]).astype(int)
aP = np.nan * np.ones([len(testPoints), len(testPoints)])
for k in range(len(testPoints)):
for j in range(len(testPoints)):
if (k>=j) or (xToChi(testPoints[k]) >= xToChi(testPoints[j])) or (np.abs(testPoints[k] - testPoints[j])<2000):
aP[k,j] = 0
else:
aP[k,j] = np.min(aSummary[k,j])
del testPoints, k, j
</code>
### Type (b) events
<code>
# Extract candidate points
bCandidates = np.nan * np.ones([21, 19 + 2, 3])
for chi1 in range(1, 22):
for chi2 in range(1, 22):
if chi1 < chi2:
[(pt1, pt2), size] = extractButterflyCandidate(filtered_hic, chi1, chi2, [50,50], 1)[0]
bCandidates[chi1-1, chi2-1] = np.array([pt1, pt2, size])
del chi1, chi2, pt1, pt2, size
</code>
<code>
# Compute summary statistics
mat = filtered_hic
ws = [50, 50]
bP = np.nan * np.ones([2, 21, 21, 8])
for par in [0,1]:
for chi1 in range(1, 22):
for chi2 in range(1, 22):
if chi1 >= chi2:
bP[par, chi1-1, chi2-1] = np.zeros(8)
else:
bP[par, chi1-1, chi2-1] = testButterflyCandidate(mat, (corrchromlocations[[chi1-1, chi2-1]]+25+bCandidates[chi1-1, chi2-1, 1]).astype(int), ws, 10, 100, par)
del mat, ws, chi1, chi2, par
</code>
<code>
# Convert summaries into well-calibrated p-values
calibratedbP = np.array([calibrateButterflyTests(bP[0], 5, 0), calibrateButterflyTests(bP[1], 5, 1)])
</code>
<code>
# Report events corresponding to significant p-values
candidatesa = np.concatenate([aCandidates, acrox]).astype(int)
candidatesb = np.nan * np.ones([2, len(bP[0]), len(bP[0]), n, 5])
for par in [0,1]:
for j in range(len(bP[0])):
for k in range(len(bP[0])):
candidatesb[par, j, k] = np.array([bP[par, j, k, 0], bP[par, j, k, 1], xToChi(bP[par, j, k, 0]), xToChi(bP[par, j, k, 1]), calibratedbP[par, j, k]])
list = np.nan * np.ones([len(candidatesa), len(candidatesa), 5])
for j in range(len(candidatesa)):
for k in range(len(candidatesa)):
list[j,k] = np.array([candidatesa[j], candidatesa[k], xToChi(candidatesa[j]), xToChi(candidatesa[k]), aP[j,k]])
list = np.concatenate([np.concatenate(list), np.concatenate(np.concatenate(candidatesb[0])), np.concatenate(np.concatenate(candidatesb[1]))])
list = list[list[:,-1] > 0]
list = list[list[:,-1].argsort()][::-1]
threshold = findThreshold(list[:,-1], 2)
</code>
| {
"filename": "HiDENSEC.ipynb",
"repository": "songlab-cal/HiDENSEC",
"query": "transformed_from_existing",
"size": 51034,
"sha": ""
} |
# DESeq2_4.ipynb
Repository: LucaMenestrina/DEGA
# DESeq2 Use Case
## Load Libraries
<code>
library("DESeq2")
library("genefilter")
</code>
Set variables (data from the [Bottomly et al.](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0017820) dataset)
<code>
GENE_COUNTS = "https://raw.githubusercontent.com/LucaMenestrina/DEGA/main/validation/bottomly_counts.csv" # "bottomly_counts.csv"
PHENO_DATA = "https://raw.githubusercontent.com/LucaMenestrina/DEGA/main/validation/bottomly_phenotypes.csv" # "bottomly_phenotypes.csv"
VAR_TO_TEST = "strain"
</code>
Load data
<code>
colData <- read.csv(PHENO_DATA, sep=",", row.names=1)
countData <- as.matrix(read.csv(GENE_COUNTS, row.names="X"))
# filter and sort countData columns on the basis of colData index
# (they have to be in the same order)
countData <- countData[, rownames(colData)]
</code>
Create DESeq2 object
<code>
dds <- DESeqDataSetFromMatrix(countData = countData, colData = colData, design = as.formula(paste("~", VAR_TO_TEST)))
</code>
Run the differential expression analysis
<code>
dds <- DESeq(dds)
res <- results(dds, alpha=0.05, lfcThreshold=0)
resS = lfcShrink(dds, alpha=0.05, lfcThreshold=0, coef=2, type="normal")
</code>
<code>
summary(resS, alpha=0.05)
</code>
<code>
# Save results
# write.csv(res, "DESeq2_bottomlyResults.csv")
# write.csv(resS, "DESeq2_bottomlyWithShrinkageResults.csv")
</code>
## References
Love, M. I., Huber, W., Anders, S. (2014). Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2. _Genome Biology_, _15_(12), 550. [https://doi.org/10.1186/S13059-014-0550-8/FIGURES/9](https://doi.org/10.1186/S13059-014-0550-8/FIGURES/9)
Bottomly, D. et al. (2011). Evaluating Gene Expression in C57BL/6J and DBA/2J Mouse Striatum Using RNA-Seq and Microarrays. _PLOS ONE_, _6_(3), e17820. [https://doi.org/10.1371/JOURNAL.PONE.0017820](https://doi.org/10.1371/JOURNAL.PONE.0017820)
| {
"filename": "DESeq2_4.ipynb",
"repository": "LucaMenestrina/DEGA",
"query": "transformed_from_existing",
"size": 14505,
"sha": ""
} |
# Project_未命名.ipynb
Repository: Peevin/TNBC
<code>
import scanpy as sc
import pandas as pd
import numpy as np
</code>
<code>
sc.settings.set_figure_params(dpi=300, facecolor='white')
</code>
<code>
adata = sc.read_h5ad('/Users/liupeiwen/BC/21 CC Single-cell analyses reveal key immune cell subsets associated with response to PD-L1 blockade in triple-negative breast cancer/data/cc.bc.sc.T.ann.h5ad')
</code>
<code>
adata.obs
</code>
<code>
np.unique(adata.obs['Sub_Cluster'])
</code>
<code>
adata.obs['label'] = np.where(adata.obs['Sub_Cluster']=='t_pDC-LILRA4','pDC',adata.obs['Global_Cluster'])
</code>
<code>
np.unique(adata.obs['label'])
</code>
<code>
sc.pp.normalize_total(adata,target_sum=1e6)
</code>
<code>
sc.pl.dotplot(adata,var_names=['TNFSF9','SIGLEC9'],groupby='label',save='.pdf')
</code>
| {
"filename": "Project_未命名.ipynb",
"repository": "Peevin/TNBC",
"query": "transformed_from_existing",
"size": 255361,
"sha": ""
} |
# taxonomy_explore_github_topics.ipynb
Repository: kuefmz/define
<code>
import pandas as pd
</code>
<code>
df = pd.read_csv('topics.csv')
</code>
<code>
df.head()
</code>
<code>
df.shape
</code>
<code>
print('Number of different topics on GitHub')
len(df['topic'].unique())
</code>
<code>
topic_counter = {}
for ind, row in df.iterrows():
if row['topic'] not in topic_counter.keys():
topic_counter[row['topic']] = row['num_pushers']
else:
topic_counter[row['topic']] += row['num_pushers']
</code>
<code>
print('Number of unique topics: ', len(topic_counter.keys()))
</code>
<code>
topic_counts = []
for k in topic_counter.keys():
#print(k, ' - ', topic_counter[k])
topic_counts.append((k, topic_counter[k]))
</code>
<code>
topic_counts.sort(key=lambda x: x[1], reverse=True)
</code>
<code>
topic_counts
</code>
| {
"filename": "taxonomy_explore_github_topics.ipynb",
"repository": "kuefmz/define",
"query": "transformed_from_existing",
"size": 48165,
"sha": ""
} |
# bioinformatics_bootcamp_2018_ATAC-seq-checkpoint_2.ipynb
Repository: ryanmarina/BMS
# BIOM 200 bioinformatics bootcamp - ATAC-seq analysis
* [(Pre-class) Introduction](#introduction)
* [(Pre-class) Installations](#installations)
* [(In-class) Data processing](#processing)
* [(In-class) Data analysis](#processing)
* [(In class) Genome browser](#genomebrowser)
* [(Optional) Single cell ATAC-seq](#scatac)
## <a name="introduction"></a>Introduction
ATAC-seq is an assay that captures accessible chromatin first described in [Buenrostro et al. 2013](https://doi.org/10.1038/nmeth.2688), and it stands for Assay for Transposase-Accessible Chromatin using sequencing. It has become a popular assay because of its advantages over previous assays (DNAseI-seq and FAIRE-seq), such as the relatively easy protocol and low cellular input.
In this tutorial, we will cover:
* Processing ATAC-seq data
* High level summary using gene ontology
* Finding enriched transcription factor motifs
* Visualizing results on the UCSC genome browser
___

___
Unlike something like ChIP-seq, ATAC-seq is typically not run with a control due to the limited information obtained. ATAC-seq is typically sequenced with paired-end sequencing for the following reasons:
* More sequence data leads to better alignment results. Many genomes contain numerous repetitive elements, and failing to align reads to certain genomic regions unambiguously renders those regions inaccessible to the assay.
* With ATAC-seq, we are interested in knowing the full span of the DNA fragments generated by the assay. A DNA fragment generated by the ATAC is typically longer than a sequence read, so a read will define only one end of the fragment. Therefore, with single-end sequencing, we would have to guess where the other end of the fragment is. Since paired-end sequencing generates reads from both ends, the full span of the DNA fragment is known precisely.
* PCR duplicates are identified more accurately. PCR duplicates are artifacts of the ATAC-seq procedure, and they should be removed as part of the analysis pipeline. However, computational programs that remove PCR duplicates (e.g. Picard's MarkDuplicates) typically identify duplicates based on comparing ends of aligned reads.
## <a name="installations"></a>Installations
### Command-line imports
TSCC has a few programs installed already, so all we have to do is load them. To check all available modules, you can type `module avail` on the command line. Add the following lines to your `.bashrc` file. These will automatically load the program [bwa](http://bio-bwa.sourceforge.net/bwa.shtml) and [bedtools](https://bedtools.readthedocs.io/en/latest/) each time you open a new terminal shell. We also need to tell bash to search in the following places for `trim_galore` and `homer`, which we will be using later.
```
PATH=$PATH:/oasis/tscc/scratch/biom200/bms_2018/programs
PATH=$PATH:/oasis/tscc/scratch/biom200/bms_2018/homer/bin
module load bwa
module load bedtools
```
Now open a new terminal window. Typing in `bwa` should bring up the user manual.
### Python installations
We will need to install a couple programs and modules.
```
conda install -c bioconda cutadapt macs2
```
### Index the reference genome for BWA
We will need an indexed reference genome for the BWA algorithm. Each aligner program has expects its own index, so we can't simply reuse the one we built for STAR in the RNA-seq alignment section. In the interest of time, I have already downloaded and prepared a reference genome for you. We will just need to link it over from the common directory.
```
ln -s /oasis/tscc/scratch/biom200/bms_2018/bwa_ref/ ~/scratch/bwa_ref
```
If you want to prepare the reference genome yourself, download the GRCm38 (mm10) genome sequence from gencode and use `bwa index` to prepare it.
```
wget ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M18/GRCm38.p6.genome.fa.gz
gunzip GRCm38.p6.genome.fa.gz
mv GRCm38.p6.genome.fa mm10.fa
bwa index mm10.fa
```
## <a name="processing"></a>Data processing
### Obtaining the datasets
These ATAC-seq datasets are from the ENCODE project website and were generated from the same samples that were used for the RNA-seq portion of this bootcamp.
You can find them here:
https://www.encodeproject.org/experiments/ENCSR347RZI/
https://www.encodeproject.org/experiments/ENCSR984HFU/
To simplify things, we will start with processing the 4 hour timepoint.
Create a new directory for ATAC-seq and create links to the `.fastq` files that were pre-downloaded.
```
mkdir -p ~/raw_data/ATAC/ ~/projects/ATAC/
ln -s /oasis/tscc/scratch/biom200/bms_2018/atac_seq/raw_data/*.fastq.gz ~/raw_data/ATAC/
cd ~/projects/ATAC/
```
### Trimming adapter sequences from reads
Adapter trimming is necessary because of the way ATAC-seq works (tagmentation, where the DNA is simulaneously fragmented and sequencing adapters are added). Adapters in the sequencing data may prevent the reads from aligning to the reference genome and adversely affect the downstream analysis.
For ATAC-seq data, we trim adapter sequence using a program called [trim_galore](https://github.com/FelixKrueger/TrimGalore/blob/master/Docs/Trim_Galore_User_Guide.md). The nice thing about this program is that it automatically detects which adapters are present in your reads (Nextera adapters for ATAC-seq).
We need to specify that our reads are paired-end (`--paired`). This could take a while, but it's not memory intensive. Submit this as a script (`qsub -V`).
```
#!/bin/bash
#PBS -q hotel
#PBS -N trim_adapters
#PBS -l nodes=1:ppn=4
#PBS -l walltime=1:00:00
#PBS -o trim_adapters.out
#PBS -e trim_adapters.err
trim_galore --paired --output_dir ~/projects/ATAC/ ~/raw_data/ATAC/dendritic.4_hour.R1.fastq.gz ~/raw_data/ATAC/dendritic.4_hour.R2.fastq.gz
```
The program will take a while to run, but in the end it should output two new files in the `~/projects/ATAC/` directory with trimmed reads:
`dendritic.4_hour.R1_val_1.fq.gz` and `dendritic.4_hour.R2_val_2.fq.gz`.
___
### Align reads to the reference genome
We can now align the reads to a reference genome (mm10) and look for peaks of transposition activity. We will use the [bwa mem](http://bio-bwa.sourceforge.net/bwa.shtml) aligner and pipe the output to [samtools](http://www.htslib.org/doc/samtools.html) for an initial filter on high quality mapped reads with `samtools view`. We will next use `samtools fixmate`, which is necessary for the duplicate removal step later. We will finally sort by coordinate with `samtools sort` and use `samtools markdup` to mark and remove duplicate sequences. In this long chain of commands, we use `-` to indicate we want to read the output from the previous command.
We will need a copy of the mm10 reference genome prepared earlier (see installations for instructions for preparing a different reference genome, e.g. hg19 for human data).
*WARNING*: This command may take a long time to run, so submit it as a script. Use `qsub -V` to ensure that your paths get copied over to the job.
```
#!/bin/bash
#PBS -q hotel
#PBS -N bwa_mapping
#PBS -l nodes=1:ppn=16
#PBS -l walltime=1:00:00
#PBS -o bwa_mapping.out
#PBS -e bwa_mapping.err
module load bwa
bwa mem -t 16 ~/scratch/bwa_ref/mm10.fa ~/raw_data/ATAC/dendritic.4_hour.R1_val_1.fq.gz ~/raw_data/ATAC/dendritic.4_hour.R2_val_2.fq.gz \
| samtools view -@ 8 -b -u -h -f 3 -F 256 -F 2048 -q 30 - \
| samtools fixmate -m - - \
| samtools sort -m 4G -@ 8 - \
| samtools markdup -r - ~/projects/ATAC/dendritic.4_hour.nodup.bam
```
We used a couple of argument flags for `samtools view`. Here are what some of them mean:
* `-b`: output BAM or binary format
* `-u`: output uncompressed which is useful for piping to other samtools commands
* `-h`: output header
* `-f 3`: only include alignments marked with the SAM flag `3`, which means "properly paired and mapped"
* `-F 256`: exclude alignments with flag `256`, which means that bwa mapped the read to multiple places in the reference genome, and this alignment is not the best
* `-F 2048`: exclude alignments marked with SAM flag `2048`, indicating chimeric alignments, where bwa decided that parts of the read mapped to different regions in the genome. These records are the individual aligned segments of the read. They usually indicate structural variation. We're not going to base peak calls on them.
* `-q 30`: exclude alignments with a mapping score < 30
The manual page for samtools has more information on flags and how to use them to filter your alignments.
It's hard to remember what the flags mean. Lucky for us, there's a web-based tool by the Broad Institute to [explain SAM flags](https://broadinstitute.github.io/picard/explain-flags.html).
___
### Call accessible chromatin peaks
Let's assume that we have aligned and processed both datasets (0 hour, 4 hour). We have the final `.bam` files from both of these datasets, so the next step is to call peaks on all of them.
We will next use [MACS2](https://github.com/taoliu/MACS) to call peaks (regions with high transposition activity) on the aligned reads. Use `qsub -V` to ensure that your paths get copied over to the job.
```
#!/bin/bash
#PBS -q hotel
#PBS -N peaks
#PBS -l nodes=1:ppn=1
#PBS -l walltime=1:00:00
#PBS -o peaks.out
#PBS -e peaks.err
macs2 callpeak -t ~/projects/ATAC/dendritic.4_hour.nodup.bam -n dendritic.4_hour --outdir ~/projects/ATAC/ -g mm --nomodel --shift -100 --extsize 200
```
MACS2 calls peaks using the default false discovery rate (FDR) of 0.05. If you don't know what an false discovery rate is, don't worry, you'll learn more about it in the statistics class in Winter/Spring quarter. Basically, instead of setting a hard p-value cutoff of 0.05 every time regardless of the p-value distribution, an FDR considers the shape of the p-value distribution and adjusts accordingly.
We used a couple of argument flags for `macs2 callpeak`. Here are some of them mean:
* `-t`: the "treatment" file -- the input, which is the BAM file from the previous step
* `-n`: the name of the experiment, which is used to name files
* `-g`: the genome's mappable size; 'mm' is an alias for the mouse genome's mappable size
* `--nomodel, --shift, and --extsize`: MACS2 was originally designed for ChIP-seq data, so we're telling it not to use its built-in model, but to extend and shift reads in a way appropriate for ATAC-seq.
When the program finishes running, there should be an output file called `dendritic.0_hour_peaks.narrowPeak`.
These are our final ATAC-seq peaks in [BED format](https://genome.ucsc.edu/FAQ/FAQformat.html), which is a file format that contains genomic coordinates.
Open the peaks file for viewing
```
less -S ~/projects/ATAC/dendritic.4_hour_peaks.narrowPeak
```
This should be a tab-separated BED file - each row in this file contains information about a single ATAC-seq peak. The first three columns will contain information about where the peak is located within the genome, specifically these are chromosome, start position, and end position.
>Here is an example of what you should see.

**Now run the same command on the other dataset.**
___
If you've reached this point, try processing the other dataset.
*HINT*: You can put everything together (trimming adapters, aligning, calling peaks) in a script and submit it as a job. Make sure you change the filenames!
## <a name="analysis"></a>Data analysis
### bedtools
[BEDtools](https://bedtools.readthedocs.io/en/latest/) (bedtools) is a command-line program that was designed around manipulating BED files. Try typing in `bedtools` on the command line and you will see that it has a wide range of functions.
We will use bedtools to see how many peak calls overlap and differ between our two timepoints.
Let's check the concordance between the peak calls.
We can first check how many lines are in each BED file with a simple `wc -l` (line count) command. **How many peaks were called for each timepoint?** Modify this command to count the lines for both BED files.
```
wc -l ~/projects/ATAC/dendritic.4_hour_peaks.narrowPeak
```
Next, we will use the `bedtools intersect` command to figure out how many peaks are common to both datasets.
You can find the documentation for it [here](https://bedtools.readthedocs.io/en/latest/content/tools/intersect.html).
**How would you use this command to find out the number of common peaks between the two timepoints?**
*HINT*: pipe your outputs to `wc -l`
Now that we know how many peaks are similar between the two timepoints, let's find out the number of peaks that differ between them.
**How would you use the bedtools intersect command to do this?**
*HINT*: you want the inverse of intersect
___
### GREAT (gene ontology for non-coding regions)
[GREAT](http://great.stanford.edu/public/html/) stands for Genomic Regions Enrichment of Annotations Tool, which is basically a fancy way to say gene ontology without using genes as input. Instead, the input is used-defined genomic regions, which could be from any range of genomic assays (e.g. ATAC-seq, ChIP-seq, WGBS). GREAT assigns genomic regions to genes using a combination of methods. Based on these gene assignments, it then runs standard gene ontology which can provide a high-level summary of the data.
Here, we will use GREAT to analyze the broad changes between the 4 hour and 0 hour timepoints. Specifically, we will see what gene ontology terms are enriched in the peaks specific to the 4 hour timepoint as compared to all 4 hour timepoint peaks.
First, we will want to format our `narrowPeak` files into test and background regions for input into GREAT.
GREAT expects a BED file as input with 4 columns: chromosome, start, end, and peak name.
Use the `bedtools intersect` command that we used earlier to find peaks specific to the 4 hour timepoint.
Our `narrowPeak` file contains some extraneous information that we don't need for GREAT, so we will use the `cut` utility to extract the columns we need (`-f 1-4` means columns 1-4).
```
bedtools intersect -a ~/projects/ATAC/dendritic.4_hour_peaks.narrowPeak \
-b ~/projects/ATAC/dendritic.0_hour_peaks.narrowPeak -v | cut -f 1-4 \
> ~/projects/ATAC/dendritic.4h_specific.bed
cut -f 1-4 ~/projects/ATAC/dendritic.4_hour_peaks.narrowPeak > ~/projects/ATAC/dendritic.4h_background.bed
```
Next, transfer the GREAT input files to a location on your local computer.
You can use `scp` (or drag + drop on the sidebar for MobaXterm users).
Navigate to the [GREAT web tool](http://great.stanford.edu/public/html/).
We will use these options:
**Species Assembly**: `Mouse: NCBI build 38 (UCSC mm10, Dec/2011)`
**Test regions**: `dendritic.4h_specific.bed`
**Background regions**: `dendritic.4h_background.bed`
It will take a few minutes, but the results should return in the same web browser.
Scroll down to MSigDB Pathway results. Do the results make sense?
> You can visualize the results as a bar graph. It should look something like this.

___
### Motif enrichment
One of the powerful uses of ATAC-seq is to find the transcriptional drivers that mediate changes in expression. Through a sequence motif enrichment analysis, we can use the DNA sequences underlying the accessible chromatin regions to discover which transcription factors (TFs) are likely driving these changes.
<img align='right' src='http://homer.ucsd.edu/homer/pic2.gif'>
We will use the program [HOMER](http://homer.ucsd.edu/homer/motif/), which was developed by [Chris Benner](https://profiles.ucsd.edu/christopher.benner) at UCSD. HOMER works by identifying sequences of various lengths that are enriched in the test set relative to the background. For the test set, we will use the 4h specific peaks, and we will use all 4h peaks as the background. HOMER can automatically create a GC-content matched background, but it's better to specify a background in this case because we want to know what TF motifs are enriched after treatment.
We will be using the `findMotifsGenome.pl` script from the HOMER program. You can find the documentation for this tool [here](http://homer.ucsd.edu/homer/ngs/peakMotifs.html).
```
#!/bin/bash
#PBS -q hotel
#PBS -N motifs
#PBS -l nodes=1:ppn=8
#PBS -l walltime=1:00:00
#PBS -o motifs.out
#PBS -e motifs.err
findMotifsGenome.pl ~/projects/ATAC/dendritic.4h_specific.bed mm10 homer_motifs -bg ~/projects/ATAC/dendritic.4h_background.bed -size 200 -nomotif -bits -mset vertebrates -p 8
```
The first three positional arguments to `findMotifsGenome.pl` are:
1. input BED file
2. HOMER reference genome
3. output directory
We used a couple of argument flags for `findMotifsGenome.pl`. Here are what some of them mean:
* `-bg`: specifies that we want to use a background regions file instead of having automatically match random regions
* `-size`: fragment size to use for motif finding
* `-nomotif`: skips the *de novo* motif scanning part of the HOMER routine (for speed)
* `-bits`: output the motif sequence logos scaled for information content
* `-mset`: use the vertebrates motif collection for known motif enrichment
This may take a while - HOMER will output a progress log.
When HOMER finishes running, there will be an output directory (`homer_motifs` which we specified earlier) containing all of the motif enrichment results. The file that contains the summary of the analysis is `knownResults.html`, which stores results for the top enriched motifs. Transfer this file to your local computer with `scp` (or drag + drop on the sidebar for MobaXterm users) and open it with your default web browser.
> You should see something that looks like this (this screenshot shows a truncated view of the top 5 results).

By far, the most enriched motif is NFKB, which makes sense given that LPS treatment should signal through Toll-like receptors and activate the NFKB pathway. However, among the top results we see examples of repeated known motif matches. This happens because the DNA binding motifs of TFs in the same family may be extremely similar, such as the OCT (POU) family (core sequence motif: ATGCAAAT).
## <a name="genomebrowser"></a>Genome browser
### Using pre-made genome browser tracks
I've compiled a UCSC genome browser session with some of the data we've been working with this bootcamp.
It often helps to visualize the data you're working with, instead of looking at numbers all the time.
https://genome.ucsc.edu/cgi-bin/hgTracks?hgS_doOtherUser=submit&hgS_otherUserName=jchiou42&hgS_otherUserSessionName=BMS_bootcamp_2018
In this session, I have included the two replicates of RNA-seq data that Ryan used for differential expression, as well as the ATAC-seq data that I used.
You can use this session to visualize peaks in one dataset, but not the other.
For example, look up the region surrounding the Cd40 gene `chr2:165039617-165087673`, which has highly upregulated expression when dendritic cells are treated with LPS.

Although there are dramatic differences in expression, the chromatin profile only changes slightly (at the promoter and generally across the gene body).
___
### Making your own genome browser tracks (in the future...)
Usually ENCODE data will have `.bigWig` files that we can visualize in the UCSC genome browser. However in special cases where they don't have the tracks (such as the data that we've been using for this bootcamp), we will need to make the tracks ourselves.
First install [deeptools](https://deeptools.readthedocs.io/en/develop/), which is a program suite that has a lot of utilities for working with NGS data.
```conda install -c bioconda deeptools```
We will be using the `bamCoverage` script. This takes a `.bam` file as input and can output a `.bigWig` file, which is your genome browser signal track in binary format. Let's try this out on one of the RNA-seq bam files. We will need to first index the reads with `samtools`.
```
mkdir -p ~/projects/mouse_LPS/genome_browser
ln -s /oasis/tscc/scratch/biom200/bms_2018/rna_seq/analysis/star_alignment/bam_files/mouse_0hr_rep1_Aligned.sorted.bam ~/projects/mouse_LPS/genome_browser
samtools index ~/projects/mouse_LPS/genome_browser/mouse_0hr_rep1_Aligned.sorted.bam
bamCoverage --bam ~/projects/mouse_LPS/genome_browser/mouse_0hr_rep1_Aligned.sorted.bam --outFileName ~/projects/mouse_LPS/genome_browser/mouse_0hr_rep1.RNAseq.bw --binSize 50 --numberOfProcessors 1 --normalizeUsing RPKM --effectiveGenomeSize 1870000000 --skipNonCoveredRegions
```
Here the number 1870000000 refers to 1.87 gigabases, the approximate mappable genome size of the mouse genome mm10.
This should output the file `mouse_0hr_rep1.RNAseq.bw` in the `~/projects/mouse_LPS/genome_browser/` directory. We will need to upload this to an internet server with public access (here I'm using Amazon's web services (AWS) for this, TSCC should have an ftp directory that can face outwards).
However you choose to upload your file, you can now add it as a custom track in the UCSC Genome Browser using the bigDataUrl option.
```track type=bigWig name="Dend. 0hr rep1 LPS RNA" description="Dendritic 0hr rep1 LPS RNA signal" visibility=2 maxHeightPixels=64 db=mm10 color=128,128,128 bigDataUrl=https://s3-us-west-2.amazonaws.com/gaulton-lab-ucsc/public/BMS_bioinf_bootcamp/mouse_0hr_rep1.RNAseq.bw```
## <a name="scatac"></a>Single cell ATAC-seq
Single cell methods enable researchers to capture finer resolution than ever before, and allow pinpointing disease mechanisms and regulatory programs to specific cell types within a bulk tissue sample. The analysis of single cell ATAC-seq is many times more challenging than analyzing single cell RNA-seq, because accessible chromatin is essentially binary at the single-cell level.
___
For those interested in learning more about single cell ATAC-seq or those who have finished everything early, there is a well-documented tutorial for [analyzing a mouse atlas dataset](http://atlas.gs.washington.edu/mouse-atac/).

___
Papers that demonstrate use cases of single cell ATAC-seq include:
1. [Mouse Atlas](https://doi.org/10.1016/j.cell.2018.06.052)
2. [Fly Embryo](https://www.nature.com/articles/nature25981)
3. [Mouse Forebrain](https://www.nature.com/articles/s41593-018-0079-3)
4. [Hematopoietic Landscape](https://doi.org/10.1016/j.cell.2018.03.074)
| {
"filename": "bioinformatics_bootcamp_2018_ATAC-seq-checkpoint_2.ipynb",
"repository": "ryanmarina/BMS",
"query": "transformed_from_existing",
"size": 28407,
"sha": ""
} |
# thesis_homer_genome_annotation_1.ipynb
Repository: liouhy/2022-Charite-master
# HOMER - genome annotation
Here, we used HOMER to annotate genomic regions from scATAC-seq datasets. First, we created bed files of genomic regions.
<code>
import pandas as pd
import anndata as ad
</code>
<code>
# Granja et al.
ft = pd.read_csv('../raw/scATAC-Healthy-Hematopoiesis-191120-rows.csv')
ft_homer = pd.DataFrame()
ft_homer[['chr','start','end']] = ft.iloc[:,0].str.split('_',expand=True)
ft_homer['id'] = ft['name']
ft_homer['score'] = ft['score']
ft_homer['strand'] = '*'
ft_homer.to_csv('../raw/scATAC-Healthy-Hematopoiesis-191120-ft-homer.txt', sep='\t', header=None, index=None)
</code>
<code>
# Luecken et al.
r_adata = ad.read_h5ad("../processed/Luecken_multiome_BMMC-r_adata.h5ad")
index=r_adata.var.loc[r_adata.var['feature_types'] == 'ATAC',:].index
bed=index.str.split('-', expand=True).to_frame(index=False, name=['chr','start','end'])
bed.to_csv('../processed/Luecken_ATAC.bed', sep='\t', header=None, index=None)
</code>
<code>
# 10x
adata_atac = ad.read_h5ad('../processed/10X_multiome/pbmc_atac.h5ad')
bed = pd.DataFrame(adata_atac.var.index).iloc[:,0].str.split('-',expand=True)
bed.to_csv('../processed/10X_multiome/ATAC.bed', sep='\t', header=None, index=None)
</code>
Run the lines in homer_annotation.txt to perform annotation.
| {
"filename": "thesis_homer_genome_annotation_1.ipynb",
"repository": "liouhy/2022-Charite-master",
"query": "transformed_from_existing",
"size": 3038,
"sha": ""
} |
# table_model_1_1.ipynb
Repository: DongjoonLim/EvoLSTM
<code>
import numpy as np
from tqdm.notebook import tqdm
!nvidia-smi
</code>
<code>
k = 7
des = str(np.load('prepData/insert2Des__HPGPNRMPC_hg38_chr2.npy'))
anc = str(np.load('prepData/insert2Anc__HPGPNRMPC_hg38_chr2.npy'))
print(len(anc), len(des))
def buildTable(anc, des):
table = {}
freq = {}
for i in tqdm_notebook(range(len(anc)-k*2-1)):
try:
table[anc[i:i+k*2+1], des[i+k]] += 1
except KeyError:
table[anc[i:i+k*2+1], des[i+k]] = 1
try:
freq[anc[i:i+k*2+1]] += 1
except KeyError:
freq[anc[i:i+k*2+1]] = 1
for key in tqdm_notebook(table.keys()):
table[key] = table[key]/freq[key[0]]
return table, freq
table, freq = buildTable(anc,des)
print(table)
np.save('table_{}'.format(k*2+1), table)
</code>
<code>
ancName = '_HPGPNRMPC'
def load_seq(chromList):
inputAll = ''
predAll = ''
outputAll = ''
for chromosome in chromList:
try:
inputAll += str(np.load('prepData/insert2Anc_{}_hg38_chr{}.npy'.format(ancName,chromosome)))#[:10000000]
outputAll += str(np.load('prepData/insert2Des_{}_hg38_chr{}.npy'.format(ancName,chromosome)))#[:10000000]
predAll += str(np.load('prepData/simulated_{}_-1_chr{}.npy'.format(ancName, chromosome)))#[:10000000]
except FileNotFoundError:
print(chromosome)
continue
print(len(inputAll), len(outputAll), len(predAll))
print(inputAll[-10:], outputAll[-10:], predAll[-10:])
return [inputAll], [outputAll], [predAll]
inputAll, outputAll, predAll = load_seq([12,16,17,19,20,21,22])
</code>
<code>
np.set_printoptions(precision=15)
def model_simulate(alphabet, k, inputAll):
inputAll = inputAll[0]
table15 = np.load('table_15.npy',allow_pickle=True).item()
table11 = np.load('table_11.npy',allow_pickle=True).item()
table5 = np.load('table_5.npy',allow_pickle=True).item()
table1 = np.load('table_1.npy',allow_pickle=True).item()
pred_table = ''
for i in tqdm_notebook(range(len(inputAll))):
if i <k:
pred_table += inputAll[i]
elif k<=i<len(inputAll)-k:
mut_prob = []
for item in alphabet:
try:
mut_prob.append(table15[inputAll[i-7:i+7+1], item])
except KeyError:
try:
mut_prob.append(table11[inputAll[i-5:i+5+1], item])
except KeyError:
try:
mut_prob.append(table5[inputAll[i-2:i+2+1], item])
except KeyError:
try:
mut_prob.append(table1[inputAll[i], item])
except KeyError:
mut_prob.append(0)
# print(mut_prob)
# print(np.random.choice(len(mut_prob), 1, p=mut_prob))
# print(sum(mut_prob))
# mut_prob = [float(i)/sum(mut_prob) for i in mut_prob]
mut_prob = np.array(mut_prob)
mut_prob /= mut_prob.sum()
pred_table += alphabet[np.random.multinomial(1, mut_prob).argmax()]
# pred_table += alphabet[np.random.choice(len(mut_prob), 1, p=mut_prob)[0]]
else :
pred_table += inputAll[i]
return pred_table
</code>
<code>
# k = 7
mut_dict = np.load('mut_dict_insert2.npy',allow_pickle=True).item()
inv_dict = {v: k for k, v in mut_dict.items()}
print(list(inv_dict.keys()))
alphabet = list(inv_dict.keys())
# pred_table = model_simulate(alphabet, k, inputAll)
# np.save('predTable_{}'.format(k*2+1), pred_table)
# print(pred_table[:1000])
</code>
<code>
import math
def cross_entropy(alphabet, k, inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
table = np.load('table_{}.npy'.format(k*2+1),allow_pickle=True).item()
result = 0
count = 0
for i in tqdm_notebook(range(len(inputAll))):
if k<=i<len(inputAll)-k:
try:
result += -math.log(table[inputAll[i-k:i+k+1], outputAll[i]])
count +=1
except KeyError:
continue
# print('keyError')
return result/count
def cross_entropy39(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 19<=i<len(inputAll)-19:
try:
result += -math.log(table39[inputAll[i-19:i+19+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table21[inputAll[i-10:i+10+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table15[inputAll[i-7:i+7+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table11[inputAll[i-5:i+5+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table5[inputAll[i-2:i+2+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
def cross_entropy21(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 15<=i<len(inputAll)-10:
try:
result += -math.log(table21[inputAll[i-10:i+10+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table15[inputAll[i-7:i+7+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table11[inputAll[i-5:i+5+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table5[inputAll[i-2:i+2+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
def cross_entropy15(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 15<=i<len(inputAll)-7:
try:
result += -math.log(table15[inputAll[i-7:i+7+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table11[inputAll[i-5:i+5+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table5[inputAll[i-2:i+2+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
def cross_entropy11(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 15<=i<len(inputAll)-5:
try:
result += -math.log(table11[inputAll[i-5:i+5+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table5[inputAll[i-2:i+2+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
def cross_entropy5(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 15<=i<len(inputAll)-2:
try:
result += -math.log(table11[inputAll[i-5:i+5+1], outputAll[i]])
count +=1
except KeyError:
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
def cross_entropy1(alphabet,inputAll, outputAll):
inputAll = inputAll[0]
outputAll = outputAll[0]
result = 0
count = 0
for i in tqdm(range(len(inputAll))):
if 15<=i<len(inputAll):
try:
result += -math.log(table1[inputAll[i], outputAll[i]])
count +=1
except KeyError:
result += -math.log(0.01020408163265306)
count +1
print(result/count)
# print('keyError')
return result/count
</code>
<code>
table39 = np.load('table_39.npy',allow_pickle=True).item()
table21 = np.load('table_21.npy',allow_pickle=True).item()
table15 = np.load('table_15.npy',allow_pickle=True).item()
table11 = np.load('table_11.npy',allow_pickle=True).item()
table5 = np.load('table_5.npy',allow_pickle=True).item()
table1 = np.load('table_1.npy',allow_pickle=True).item()
print(len(table1.keys()))
print(1/98)
</code>
<code>
cross_entropy39(alphabet,inputAll, outputAll)
</code>
<code>
cross_entropy21(alphabet,inputAll, outputAll)
</code>
<code>
cross_entropy15(alphabet, inputAll, outputAll)
</code>
<code>
cross_entropy11(alphabet,inputAll, outputAll)
</code>
<code>
cross_entropy5(alphabet, inputAll, outputAll)
</code>
<code>
cross_entropy1(alphabet, inputAll, outputAll)
</code>
<code>
0.22000798713506123 0.20882172435559532 0.07722072727808955 0.098972098412289 0.11081867999770367 0.06958409766111845
</code>
<code>
cross_entropy2(alphabet, 0, inputAll, outputAll)
cross_entropy2(alphabet, 2, inputAll, outputAll)
cross_entropy2(alphabet, 5, inputAll, outputAll)
cross_entropy2(alphabet, 7, inputAll, outputAll)
cross_entropy2(alphabet, 10, inputAll, outputAll)
cross_entropy2(alphabet, 19, inputAll, outputAll)
</code>
| {
"filename": "table_model_1_1.ipynb",
"repository": "DongjoonLim/EvoLSTM",
"query": "transformed_from_existing",
"size": 32967,
"sha": ""
} |
# Evaluate_Integration_LISI.ipynb
Repository: pughlab/cancer-scrna-integration
---
# Evaluate data integration using LISI
*L.Richards*
*2021-06-14*
*/cluster/projects/pughlab/projects/cancer_scrna_integration/evalutation/lisi*
---
https://github.com/immunogenomics/LISI
<code>
# install.packages("devtools")
# devtools::install_github("immunogenomics/lisi")
library(lisi) # v1.0
library(Seurat)
library(rlist)
</code>
<code>
# list metadata files with embeddings (used for Fig 1 plotting)
embeddings.path <- "/cluster/projects/pughlab/projects/cancer_scrna_integration/figures"
embeddings <- list.files(embeddings.path, pattern = ".csv")
results <- list()
for (i in 1:length(embeddings)){
# load data
print(embeddings[i])
dat <- read.csv(paste0(embeddings.path, "/", embeddings[i]))
rownames(dat) <- dat$X
#####################
# calculate lisi for each method
methods <- unique(dat$Method)
for (j in 1:length(methods)){
print(methods[j])
# set up lisi input files
sub <- dat[dat$Method == methods[j], ] # subset methods
X <- sub[ ,c("Coords_1", "Coords_2")] # subset out embeddings
# calcualte and normalize lisi for samples and patients
lisi <- compute_lisi(X, sub, c("SampleID", "PatientID"))
lisi$SampleID_Norm <- lisi$SampleID / length(unique(sub$SampleID))
lisi$PatientID_Norm <- lisi$SampleID / length(unique(sub$PatientID))
#####################
# calcualte lisi for each cell type
# have to subset the dataframe by each cell type and calc
celltypes <- unique(sub$CellType)
lisi.celltypes <- list()
for (k in 1:length(celltypes)){
print(celltypes[k])
# subset embeddings by cell type
sub.cell <- sub[sub$CellType == celltypes[k], ]
X.cell <- X[rownames(sub.cell), ]
# calculate and normalize lisi within cell type
if(nrow(X.cell) < 40){
per <- 10
} else { per <- 30 }
print(per)
cell.lisi <- compute_lisi(X.cell, sub.cell, perplexity = per, c("SampleID", "PatientID"))
cell.lisi$SampleID_Norm <- cell.lisi$SampleID / length(unique(sub.cell$SampleID))
cell.lisi$PatientID_Norm <- cell.lisi$SampleID / length(unique(sub.cell$PatientID))
colnames(cell.lisi) <- paste0("CellType_", colnames(cell.lisi))
cell.lisi$CellType <- celltypes[k]
lisi.celltypes[[k]] <- cell.lisi
}
lisi.celltypes <- do.call(rbind, lisi.celltypes) # combine
lisi.celltypes <- lisi.celltypes[rownames(lisi), ] # reorder
identical(rownames(lisi.celltypes), rownames(lisi)) # sanity check
lisi <- cbind(lisi, lisi.celltypes) # combine cell and batch lisi
lisi$Method <- methods[j]
lisi$Study <- gsub("_MergedMeta.csv", "", embeddings[i])
colnames(lisi)[grep("_", colnames(lisi))] <- paste0("LISI_", colnames(lisi)[grep("_", colnames(lisi))])
results <- list.append(results, lisi)
}
}
results <- do.call(rbind, results) # combine across studies
dim(results) # 974206 rows
colnames(results)[1:2] <- paste0("LISI_", colnames(results)[1:2])
results$CellBarcode <- rownames(results)
</code>
<code>
# save results
write.csv(results, file = "LISI_calculations.csv")
</code>
| {
"filename": "Evaluate_Integration_LISI.ipynb",
"repository": "pughlab/cancer-scrna-integration",
"query": "transformed_from_existing",
"size": 5486,
"sha": ""
} |
# MCAsubset-checkpoint.ipynb
Repository: CSUBioGroup/scNCL-release
<code>
%load_ext autoreload
%autoreload 2
import os
import h5py
import seaborn as sns
import numpy as np
import pandas as pd
import scanpy as sc
import anndata
import csv
import gzip
import scipy.io
import scipy.sparse as sps
import matplotlib.pyplot as plt
from os.path import join
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.preprocessing import normalize
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
np.random.seed(1234)
sc.settings.verbosity = 3
sc.logging.print_header()
</code>
## experiment id
<code>
exp_id = 'MCA-subset' + '-1301' # dataset_name + timestamp
</code>
## loading data
<code>
data_root = '/home/yxh/data/MCA/scjoint/atlas_subset'
adata_atac = sc.read_h5ad(join(data_root, 'adata_atac_cache.h5ad'))
adata_rna = sc.read_h5ad('/home/yxh/gitrepo/multi-omics/Portal-main/cache/adata_rna_facs.h5ad')
adata_rna.obs['domain'] = 'rna'
adata_atac.obs['domain'] = 'atac'
meta_rna = adata_rna.obs.copy()
meta_atac = adata_atac.obs.copy()
meta = pd.concat([meta_rna, meta_atac], axis=0)
</code>
<code>
adata_rna, adata_atac
</code>
<code>
# low-dimension representations of raw scATAC-seq data
atac_raw_emb = np.load('../cache/MCAsubset_atac_tsne.npy')
atac_raw_emb.shape
</code>
<code>
# params dict of preprocessing
ppd = {'binz': True,
'hvg_num':adata_atac.shape[1],
'lognorm':False,
'scale_per_batch':False,
'batch_label': 'domain',
'type_label': 'cell_type',
'knn': 10,
'knn_by_tissue':False
} # default settings
# outputs folder
output_dir = join(f'../outputs/{exp_id}')
os.makedirs(output_dir, exist_ok=True)
</code>
## model
<code>
import sys
sys.path = ["../"] + sys.path
import src.scNCL as scNCL
import src.utils as utls
</code>
<code>
model = scNCL.scNCL(
'non_linear', n_latent=64, bn=False, dr=0.2,
cont_w=0.05, cont_tau=0.4,
)
model.preprocess(
[adata_rna, adata_atac], # list of 'anndata' object
atac_raw_emb,
adata_adt_inputs=None,
pp_dict = ppd
)
if 1:
model.train(
opt='adam',
batch_size=500, training_steps=1000,
lr=0.001, weight_decay=5e-4,
log_step=50, eval_atac=False, #eval_top_k=1, eval_open=True,
)
else:
# loading checkpoints
ckpt_path = join(output_dir, 'ckpt_1000.pth')
model.load_ckpt(ckpt_path)
</code>
<code>
model.eval(inplace=True)
atac_pred_type = model.annotate()
# saving model
scNCL.save_ckpts(output_dir, model, step=1000)
</code>
<code>
ad_atac = sc.AnnData(model.feat_B)
ad_atac.obs = meta_atac.copy()
ad_atac.obs['pred_type'] = atac_pred_type
ad_atac.obs['pred_conf'] = np.max(model.head_B, axis=1)
</code>
<code>
ad_atac = utls.umap_for_adata(ad_atac)
</code>
<code>
sc.pl.umap(ad_atac, color=['cell_type', 'pred_type', 'pred_conf'])
</code>
# Evaluation
<code>
from src.metrics import osr_evaluator
share_mask = meta_atac.cell_type.isin(meta_rna.cell_type.unique()).to_numpy()
open_score = 1 - np.max(model.head_B, axis=1) # pb_max, logit_max_B
kn_data_pr = atac_pred_type[share_mask]
kn_data_gt = meta_atac.cell_type[share_mask].to_numpy()
kn_data_open_score = open_score[share_mask]
unk_data_open_score = open_score[np.logical_not(share_mask)]
closed_acc, os_auroc, os_aupr, oscr = osr_evaluator(kn_data_pr, kn_data_gt, kn_data_open_score, unk_data_open_score)
</code>
<code>
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
cm = confusion_matrix(meta_atac.cell_type.to_numpy(), atac_pred_type)
cm = cm/cm.sum(axis=1, keepdims=True)
df_cm = pd.DataFrame(cm, index = meta_atac.cell_type.unique(),
columns = meta_atac.cell_type.unique())
plt.figure(figsize = (10,7))
sns.heatmap(df_cm, )
</code>
| {
"filename": "MCAsubset-checkpoint.ipynb",
"repository": "CSUBioGroup/scNCL-release",
"query": "transformed_from_existing",
"size": 199827,
"sha": ""
} |
# GPT_1.ipynb
Repository: ZubairQazi/NDE-GPT
# GPT for Topic Categorization
<code>
import json
import pandas as pd
import numpy as np
import ast
import os
import re
from bs4 import BeautifulSoup
import csv
from tqdm.notebook import tqdm
import openai
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.schema.messages import HumanMessage, SystemMessage
</code>
## Load Data
EDAM topics, prompt, etc
<code>
with open("config.json", "r") as config_file:
config = json.load(config_file)
</code>
<code>
openai_api_key = config["api_keys"]["openai"]
</code>
<code>
dataset = pd.read_csv(input("Enter testing dataset path: "))
</code>
<code>
dataset['MeSH Terms'] = dataset['MeSH Terms'].apply(lambda mesh_list: np.unique(ast.literal_eval((mesh_list))))
dataset['EDAM Topics'] = dataset['EDAM Topics'].apply(lambda edam_list: np.unique(ast.literal_eval((edam_list))))
</code>
<code>
with open('templates/prompt_template.txt', 'r') as template_file:
template = template_file.read()
</code>
<code>
with open(input("Enter EDAM topics file:"), 'r') as edam_file:
full_edam_topics = edam_file.readlines()
full_edam_topics = [topic.strip() for topic in full_edam_topics]
</code>
<code>
# Add EDAM topics to prompt template
formatted_topics = "\n".join(full_edam_topics)
template = template.replace("<topics>", formatted_topics)
</code>
### Plots
Remove non-unique terms from each row's EDAM topic list
<code>
import matplotlib.pyplot as plt
# Create a histogram
plt.hist(dataset['EDAM Topics'].apply(len), bins='auto', edgecolor='black', alpha=0.7)
plt.hist(dataset['MeSH Terms'].apply(len), bins='auto', edgecolor='black', alpha=0.7)
plt.xlabel('Number of Topics / Terms')
plt.ylabel('Frequency')
plt.legend(['EDAM', 'MeSH'])
</code>
<code>
print("Total MeSH Terms:", len(list(dataset.iloc[0]['MeSH Terms'])))
print("Unique MeSH Terms:", len(np.unique(list(dataset.iloc[0]['MeSH Terms']))))
print()
print("Total EDAM Topics:", len(list(dataset.iloc[0]['EDAM Topics'])))
print("Unique EDAM Topics:", len(np.unique(list(dataset.iloc[0]['EDAM Topics']))))
</code>
### Remove any topics not in the EDAM Topics list
<code>
dataset['Filtered EDAM'] = dataset['EDAM Topics'].apply(lambda x: [item for item in x if item in full_edam_topics])
</code>
<code>
import matplotlib.pyplot as plt
# Create a histogram
plt.hist(dataset['EDAM Topics'].apply(len), bins='auto', edgecolor='black', alpha=0.7)
plt.hist(dataset['Filtered EDAM'].apply(len), bins='auto', edgecolor='black', alpha=0.7)
plt.xlabel('Number of Topics / Terms')
plt.ylabel('Frequency')
plt.legend(['EDAM', 'Filtered EDAM'])
</code>
<code>
# Check for any issues during filtering (missed topics, etc)
indices_true = dataset.loc[dataset['Filtered EDAM'].apply(lambda edam_list: not all(term in full_edam_topics for term in edam_list))].index
for index in indices_true:
edam_list = dataset.loc[index, 'Filtered EDAM']
terms_not_in_edam_topics = [term for term in edam_list if term not in full_edam_topics]
print(f"Index {index}: Terms not in edam_topics: {terms_not_in_edam_topics}")
</code>
## OpenAI API
Let's start with a proof of concept:
<code>
random_sample = dataset.sample(n=1)
index = random_sample.index[0]
description, abstract, paper_edam_topics = random_sample[['Description', 'Abstract', 'Filtered EDAM']].values[0]
</code>
<code>
prompt = template.replace('<abstract>', abstract).replace('<num_terms>', str(len(paper_edam_topics)))
# prompt = template.replace('<description>', description).replace('<num_terms>', str(len(paper_edam_topics)))
</code>
<code>
print(prompt)
</code>
<code>
messages = [
SystemMessage(content=f"You're a helpful assistant."),
HumanMessage(content=prompt)
]
</code>
<code>
chat = ChatOpenAI(
model_name='gpt-3.5-turbo',
openai_api_key = openai_api_key
)
gpt_output = ''
with get_openai_callback() as cb:
chat.invoke(messages)
for chunk in chat.stream(messages):
print(chunk.content, end="", flush=True)
gpt_output += chunk.content
print("\n===========CALLBACK: ==========\n")
print(cb)
print("\n=============\n")
# chat = OpenAI(
# model_name='text-davinci-003',
# openai_api_key = openai_api_key,
# temperature=0.75
# )
# gpt_output = ''
# with get_openai_callback() as cb:
# chat.invoke(messages)
# for chunk in chat.stream(messages):
# print(chunk, end="", flush=True)
# gpt_output += chunk
# print("\n===========CALLBACK: ==========\n")
# print(cb)
# print("\n=============\n")
</code>
<code>
gpt_output
</code>
<code>
not_in_edam = 0
true_topics = dataset.iloc[index]['Filtered EDAM']
num_correct = 0
for topic in gpt_output.strip().split(', '):
if topic not in full_edam_topics:
print(topic)
not_in_edam += 1
continue
if topic in true_topics:
num_correct += 1
</code>
<code>
print("GPT-outputted topics not in EDAM:", not_in_edam)
print("# Correct topics from GPT:", num_correct)
print("# Incorrect topics from GPT:", len(true_topics) - num_correct)
</code>
<code>
print(gpt_output, '\n')
print(', '.join(true_topics))
</code>
### Note: It seems as though GPT is not able to capture relevant topics given the entire list.
## Use GPT to capture major subtopics (Biology, Medicine, etc.)
We will see if GPT can capture the general topics of each data entry. Then we can pass the relevant subtopics.
https://bioportal.bioontology.org/ontologies/EDAM/?p=classes&conceptid=http%3A%2F%2Fedamontology.org%2Ftopic_0003
Biosciences - 4019
Chemistry - 3314
Computer science - 3316
Data management - 3071
Environmental Sciences - 3855
Informatics - 0605
Open science - 4010
Physics - 3318
<code>
import pandas as pd
edam_data = pd.read_csv("EDAM/EDAM.csv")
edam_data = edam_data[edam_data['Parents'].str.contains("http://edamontology.org/topic_")]
edam_data['Parents #'] = edam_data['Parents'].str.extractall(r'topic_(\d+)').groupby(level=0).agg(lambda parents: parents.tolist())
edam_data['Topic #'] = edam_data['Class ID'].apply(lambda url: url.split('topic_')[1])
</code>
<code>
from collections import defaultdict
topics = ['4019', '3314', '3316', '3071', '3855', '0605', '4010', '3318', '3361', '3068', '3678', '3315']
subtopics = defaultdict(lambda: [])
def get_children_topics(parent_id):
# children_ids = edam_data[edam_data['Parents'].str.contains(parent_id)]['Class ID'].apply(lambda url: url.split('topic_')[1]).to_list()
children_ids = edam_data[edam_data['Parents #'].apply(\
lambda parent_ids: parent_id in parent_ids)]['Topic #'].to_list()
if not len(children_ids):
return []
# print(parent_id, children_ids)
grandchildren = []
for child_id in children_ids:
grandchildren.append(get_children_topics(child_id))
children_ids.append(grandchildren)
return children_ids
for parent_topic in topics:
subtopics[parent_topic] = get_children_topics(parent_topic)
</code>
<code>
def flatten_list(nested_list):
flattened = []
for item in nested_list:
if isinstance(item, list):
flattened.extend(flatten_list(item))
else:
flattened.append(item)
return flattened
# Flatten each value in the dictionary
subtopics = {key: flatten_list(value) for key, value in subtopics.items()}
print()
for key, value in subtopics.items():
print(f"{key}: {value}")
</code>
<code>
topics_to_remove = ['3361', '3068', '3678', '3315']
topics_to_keep = ['4019', '3314', '3316', '3071', '3855', '0605', '4010', '3318']
for key in ['3361', '3068', '3678', '3315']:
topics_to_remove.extend(subtopics[key])
for key in ['4019', '3314', '3316', '3071', '3855', '0605', '4010', '3318']:
topics_to_keep.extend(subtopics[key])
topics_to_remove, topics_to_keep = set(topics_to_remove), set(topics_to_keep)
print('Number of Topics to remove: ', len([item for item in topics_to_remove if item not in topics_to_keep]))
edam_data = edam_data[~edam_data['Topic #'].apply(lambda topic: topic in topics_to_remove and topic not in topics_to_keep)]
# Remove the unnecessary topics =
for topic in ['3361', '3068', '3678', '3315']:
del subtopics[topic]
print()
for key, value in subtopics.items():
print(f"{key}: {value}")
</code>
<code>
parent_topics = defaultdict(lambda: set())
for key, values in subtopics.items():
for value in values:
parent_topics[value].add(key)
for key, value in parent_topics.items():
print(f"{key}: {value}")
</code>
<code>
main_topics = {}
for topic in ['4019', '3314', '3316', '3071', '3855', '0605', '4010', '3318']:
main_topics[topic] = edam_data[edam_data['Topic #'] == topic]['Preferred Label'].values[0]
main_topics
</code>
<code>
edam_data['Parent Topics'] = edam_data['Topic #'].apply(lambda topic:[main_topics[parent_topic] for parent_topic in parent_topics[topic]])
edam_data['Parent Topics']
</code>
<code>
dataset['Topic Category'] = dataset['Filtered EDAM'].apply(lambda edam_list: [edam_data[edam_data['Preferred Label'] == topic]['Parent Topics'].values[0] for topic in edam_list])\
.apply(lambda parent_list: set([item for sublist in parent_list for item in sublist]))
dataset['Topic Category']
</code>
## GPT for topic categories
<code>
with open('templates/prompt_template.txt', 'r') as template_file:
template = template_file.read()
formatted_topics = "\n".join(main_topics.values())
template = template.replace("<topics>", formatted_topics)
</code>
<code>
def get_accuracy(generated_topics, true_topics):
num_correct = 0
for topic in generated_topics.strip().split(', '):
if topic in true_topics:
num_correct += 1
return num_correct / len(true_topics)
</code>
<code>
def test(chat, dataset, truth_column='Topic Category', iterations=10, seed=54):
cost, accuracies = 0, []
io_pairs = []
random_samples = dataset.sample(n=iterations, random_state=seed)
for idx, random_sample in tqdm(random_samples.iterrows()):
index = random_sample.index[0]
_, abstract, paper_edam_topics = random_sample.loc[['Description', 'Abstract', truth_column]]
prompt = template.replace('<abstract>', abstract).replace('<num_terms>', str(len(paper_edam_topics)))
messages = [
SystemMessage(content=f"Generate a comma-separated list of relevant EDAM topics based on the provided abstract and topic categories."),
HumanMessage(content=prompt)
]
gpt_output = ''
with get_openai_callback() as cb:
chat.invoke(messages)
for chunk in chat.stream(messages):
if type(chat) == ChatOpenAI:
gpt_output += chunk.content
elif type(chat) == OpenAI:
gpt_output += chunk
cost += float(str(cb).split('$')[1])
try:
true_topics = dataset.iloc[index][truth_column]
accuracies.append(get_accuracy(gpt_output, true_topics))
except:
print('Error encountered at index', index)
io_pairs.append([abstract, ', '.join(true_topics), gpt_output])
print('Average Accuracy:', np.mean(accuracies))
print('Total Cost ($):', cost)
return accuracies, cost, io_pairs
</code>
<code>
chat = ChatOpenAI(
model_name='gpt-3.5-turbo',
openai_api_key = openai_api_key
)
accuracies, cost, _ = test(chat, dataset, iterations=50)
</code>
<code>
chat = OpenAI(
model_name='text-davinci-003',
openai_api_key = openai_api_key
)
accuracies, cost, _ = test(chat, dataset, iterations=50)
</code>
<code>
chat = ChatOpenAI(
model_name='gpt-4',
openai_api_key = openai_api_key
)
accuracies, cost, _ = test(chat, dataset, iterations=50)
</code>
## Double num_topics
Attempting to see if GPT will eventually get the correct topics
<code>
random_sample = dataset.sample(n=1)
index = random_sample.index[0]
description, abstract, paper_edam_topics = random_sample[['Description', 'Abstract', 'Filtered EDAM']].values[0]
</code>
<code>
prompt = template.replace('<abstract>', abstract).replace('<num_terms>', str(len(paper_edam_topics) * 2))
# prompt = template.replace('<description>', description).replace('<num_terms>', str(len(paper_edam_topics)))
</code>
<code>
print(prompt)
</code>
<code>
messages = [
SystemMessage(content=f"You're a helpful assistant."),
HumanMessage(content=prompt)
]
</code>
<code>
chat = ChatOpenAI(
model_name='gpt-3.5-turbo',
openai_api_key = openai_api_key
)
gpt_output = ''
with get_openai_callback() as cb:
chat.invoke(messages)
for chunk in chat.stream(messages):
print(chunk.content, end="", flush=True)
gpt_output += chunk.content
print("\n===========CALLBACK: ==========\n")
print(cb)
print("\n=============\n")
</code>
<code>
get_accuracy(gpt_output, paper_edam_topics)
</code>
<code>
print(abstract, '\n')
print(gpt_output)
print(', '.join(random_sample['Filtered EDAM'].values[0]))
</code>
There seems to be some potential, but requires further exploration. The main question to focus on is the definition of success, and how to measure whether the predicted topics are valid.
## Fine-Tuning GPT
We will start with 50 training samples for fine-tuning. (https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset)
<code>
n = 100
training_data = dataset.sample(n=n, replace=False, random_state=50)
training_data.shape
</code>
<code>
with open(f"datasets/finetune-data-{n}.jsonl", 'w') as file:
for idx, row in training_data.iterrows():
description, abstract, paper_edam_topics = row[['Description', 'Abstract', 'Filtered EDAM']]
prompt = template.replace('<abstract>', abstract).replace('<num_terms>', str(len(paper_edam_topics)))
json_data = {
"messages": [
{"role": "system", "content": "Generate a comma-separated list of relevant EDAM topics based on the provided abstract and topic categories."},
{"role": "user", "content": prompt},
{"role": "assistant", "content": ', '.join(paper_edam_topics)}
]
}
file.write(json.dumps(json_data))
file.write('\n')
</code>
Use ```Chat_finetuning_data_prep.ipynb``` to check for any errors in the data and to get the cost estimate.
<code>
client = openai.OpenAI(api_key=openai_api_key)
fileobj = client.files.create(
file=open(f"datasets/finetune-data-{n}.jsonl", "rb"),
purpose="fine-tune"
)
fileobj
</code>
<code>
ftjob = client.fine_tuning.jobs.create(
training_file=fileobj.id,
model="gpt-3.5-turbo-1106"
)
ftjob
</code>
<code>
# Finished once fine_tuned_model is not None
client.fine_tuning.jobs.list(limit=10).data
</code>
<code>
# Most recent job
ftjob = client.fine_tuning.jobs.list(limit=10).data[0]
</code>
<code>
from io import BytesIO
training_results = pd.read_csv(BytesIO(client.files.content(ftjob.result_files[0]).content))[['step', 'train_loss', 'train_accuracy']]
training_results
</code>
<code>
training_results['train_loss'].plot()
plt.title('Loss Plot')
plt.xlabel('Step')
plt.ylabel('Loss')
</code>
<code>
training_results['train_accuracy'].plot()
plt.title('Accuracy Plot')
plt.xlabel('Step')
plt.ylabel('Accuracy')
</code>
## Testing All Models
Gather data for all the models
<code>
def test(chat, dataset, truth_column='Filtered EDAM', iterations=10, seed=54):
cost, accuracies = 0, []
io_pairs = []
random_samples = dataset.sample(n=iterations, random_state=seed)
for idx, random_sample in tqdm(random_samples.iterrows(), total=random_samples.shape[0]):
abstract, paper_edam_topics = random_sample.loc[['Abstract', truth_column]]
prompt = template.replace('<abstract>', abstract).replace('<num_terms>', str(len(paper_edam_topics)))
messages = [
SystemMessage(content=f"Generate a comma-separated list of relevant EDAM topics based on the provided abstract and topic categories."),
HumanMessage(content=prompt)
]
gpt_output = ''
try:
with get_openai_callback() as cb:
chat.invoke(messages)
for chunk in chat.stream(messages):
if type(chat) == ChatOpenAI:
gpt_output += chunk.content
elif type(chat) == OpenAI:
gpt_output += chunk
cost += float(str(cb).split('$')[1])
true_topics = dataset.loc[idx][truth_column]
accuracies.append(get_accuracy(gpt_output, true_topics))
except Exception as e:
print('Error encountered at index', idx)
print(e)
return accuracies, cost, io_pairs
io_pairs.append([abstract, ', '.join(true_topics), gpt_output])
print('Average Accuracy:', np.mean(accuracies))
print('Total Cost ($):', cost)
return accuracies, cost, io_pairs
</code>
<code>
def get_accuracy(generated_topics, true_topics):
num_correct = 0
for topic in generated_topics.strip().split(', '):
if topic in true_topics:
num_correct += 1
return num_correct / len(true_topics)
</code>
<code>
n = 100
training_data = dataset.sample(n=n, replace=False, random_state=50)
training_data.shape
</code>
<code>
testing_data = dataset.drop(training_data.index)
testing_data['Abstract'] = testing_data['Abstract'].apply(lambda text: BeautifulSoup(text, "html.parser").get_text())
</code>
<code>
results = pd.DataFrame(columns=['Model', 'Abstract', 'Ground Truth', 'Predictions'])
</code>
<code>
# 100 training samples (11/06 version)
chat = ChatOpenAI(
model_name='ft:gpt-3.5-turbo-1106:personal::8SDAGTmv',
openai_api_key = openai_api_key,
request_timeout=120,
max_retries=12
)
accuracies, cost, io_pairs = test(chat, testing_data, truth_column='Filtered EDAM', iterations=25)
for abstract, ground_truth, predictions in io_pairs:
row = {'Model': chat.model_name, 'Abstract': abstract, 'Ground Truth': ground_truth, 'Predictions': predictions}
results = pd.concat([results, pd.DataFrame([row])], ignore_index=True)
</code>
<code>
# 100 training samples (06/13 version)
chat = ChatOpenAI(
model_name='ft:gpt-3.5-turbo-0613:personal::8SD8i1on',
openai_api_key = openai_api_key,
request_timeout=120,
max_retries=12
)
accuracies, cost, io_pairs = test(chat, testing_data, truth_column='Filtered EDAM', iterations=25)
for abstract, ground_truth, predictions in io_pairs:
row = {'Model': chat.model_name, 'Abstract': abstract, 'Ground Truth': ground_truth, 'Predictions': predictions}
results = pd.concat([results, pd.DataFrame([row])], ignore_index=True)
</code>
<code>
# 50 training samples (06/13 version)
chat = ChatOpenAI(
model_name='ft:gpt-3.5-turbo-0613:personal::8SAHvdnS',
openai_api_key = openai_api_key,
request_timeout=120,
max_retries=12
)
accuracies, cost, io_pairs = test(chat, testing_data, truth_column='Filtered EDAM', iterations=25)
for abstract, ground_truth, predictions in io_pairs:
row = {'Model': chat.model_name, 'Abstract': abstract, 'Ground Truth': ground_truth, 'Predictions': predictions}
results = pd.concat([results, pd.DataFrame([row])], ignore_index=True)
</code>
<code>
# Non-fine tuned version. Default dated model
chat = ChatOpenAI(
model_name='gpt-3.5-turbo',
openai_api_key = openai_api_key,
request_timeout=120,
max_retries=12
)
accuracies, cost, io_pairs = test(chat, testing_data, truth_column='Filtered EDAM', iterations=25)
for abstract, ground_truth, predictions in io_pairs:
row = {'Model': chat.model_name, 'Abstract': abstract, 'Ground Truth': ground_truth, 'Predictions': predictions}
results = pd.concat([results, pd.DataFrame([row])], ignore_index=True)
</code>
<code>
# Non fine-tuned GPT 4
chat = ChatOpenAI(
model_name='gpt-4',
openai_api_key = openai_api_key,
request_timeout=120,
max_retries=12
)
accuracies, cost, io_pairs = test(chat, testing_data, truth_column='Filtered EDAM', iterations=25)
for abstract, ground_truth, predictions in io_pairs:
row = {'Model': chat.model_name, 'Abstract': abstract, 'Ground Truth': ground_truth, 'Predictions': predictions}
results = pd.concat([results, pd.DataFrame([row])], ignore_index=True)
</code>
<code>
# import csv
## Convert Predictions column to a set
# results['Predictions'] = results['Predictions'].apply(lambda x: set(map(str.strip, next(csv.reader([x])))))
</code>
<code>
results.to_csv('raw_model_outputs.csv', index=False)
</code>
<code>
def print_dynamically(string, max_line_length=80):
words = string.split()
lines = []
current_line = ""
for word in words:
if len(current_line) + len(word) + 1 <= max_line_length:
current_line += word + " "
else:
lines.append(current_line)
current_line = word + " "
lines.append(current_line)
for line in lines:
print(line)
</code>
<code>
model, abstract, ground_truth, pred = results.sample(n=1).values[0]
print_dynamically(abstract)
print('\nModel:', model)
print('\nGround Truth EDAM Topics:')
print(ground_truth)
print('\nGPT Predicted EDAM Topics:')
print(pred)
</code>
<code>
## Compare old template and new template in terms of hallucinations
halluc_scores = []
for idx, row in results.iterrows():
preds, upd_preds = list(ast.literal_eval(row['Predictions'])), row['New Predictions'].split(', ')
# print(preds)
# print(upd_preds, '\n')
pred_scores = [1 if pred not in full_edam_topics else 0 for pred in preds]
pred_score = sum(pred_scores) / len(pred_scores)
upd_pred_scores = [1 if pred not in full_edam_topics else 0 for pred in upd_preds]
upd_pred_score = sum(upd_pred_scores) / len(upd_pred_scores)
halluc_score = pred_score - upd_pred_score
halluc_scores.append(halluc_score)
</code>
<code>
# Positive means old preds were worse, negative means they were better
plt.hist(halluc_scores, bins='auto', edgecolor='black', alpha=0.7)
plt.title('Hallucination Scores')
plt.xlabel('Hallucination Score')
plt.ylabel('Frequency')
</code>
| {
"filename": "GPT_1.ipynb",
"repository": "ZubairQazi/NDE-GPT",
"query": "transformed_from_existing",
"size": 117418,
"sha": ""
} |
# New_eng_academic_research_2.ipynb
Repository: kdj0712/teamKim1
<code>
import pandas as pd
import numpy as np
</code>
<code>
df_Riss_research = pd.read_csv("./csv/Seleniums.eng_academic_research.csv")
df_Riss_research.drop(labels='_id', axis=1, inplace=True)
df_Riss_research['research_subject']
</code>
## 데이터 전처리
### dataframe 내 중복되는 학술정보 제거
<code>
df_Riss_research['research_title'].value_counts()
# 중복되는 research 확인
# 1) 진행성 화골성 근염 -증례 보고- = Myositis Ossificans Progressive -A Case Report-
# 2) 비장적출로 치유된 희귀 비장 질환 치험 = Clinical Experience of Rare Splenic Disease Healed by Splenectomy
# 3) 상급종합병원과 희귀난치성질환 전문병원의 희귀의약품 사용현황
</code>
<code>
df_Riss_research.drop_duplicates(subset="research_title", keep='first', inplace=True)
df_Riss_research['research_title'].value_counts()
</code>
<code>
df_Riss_research['research_title'].value_counts()
# 더이상 중복값 없음을 확인 완료
</code>
<code>
df_Riss_research.reset_index(drop=True, inplace=True)
</code>
### 주제어 존재하는 column만 추출
<code>
drop_index = df_Riss_research[df_Riss_research['research_subject'].str.contains(';')==True].index
</code>
<code>
df_Riss_research_subject = df_Riss_research[df_Riss_research['research_subject'].str.contains(';')==True]
df_Riss_research_subject.reset_index(drop=True, inplace=True)
df_Riss_research_subject
</code>
<code>
condition = "research_language != 'KCI등재후보'"
df_Riss_research_subject01 = df_Riss_research_subject.query(condition)
df_Riss_research_subject01.reset_index(drop=True, inplace=True)
df_Riss_research_subject01
</code>
<code>
type(df_Riss_research_subject01['research_type'][3])
</code>
<code>
int(df_Riss_research_subject01['research_type'][3])
</code>
<code>
for i in range(len(df_Riss_research_subject01['research_type'].index)):
try:
if type(int(df_Riss_research_subject01['research_type'][i])) == int:
condition03 = "research_page != '학술저널'"
df_Riss_research_subject02 = df_Riss_research_subject01.query(condition03)
except:
pass
df_Riss_research_subject02.reset_index(drop=True, inplace=True)
</code>
<code>
df_new = df_Riss_research_subject02[['research_title', 'research_subject']]
df_new.to_csv("eng_research_subject.csv", sep='\t', encoding='utf-8')
</code>
### research_title 영문명만 분리
<code>
import re
def no_korean(text):
patterns = '([가-힣]|[一-龥]|[0-9]|[;])'
text_regex = re.sub(pattern=patterns, repl=' ', string=text)
return text_regex
df_Riss_research_subject['research_subject'] = df_Riss_research_subject['research_subject'].apply(no_korean)
</code>
<code>
df_Riss_research_subject['research_subject']
</code>
<code>
df_new =pd.DataFrame(df_Riss_research_subject['research_subject'])
df_new
</code>
<code>
df_new.to_csv("eng_research_subject.csv", sep='\t', encoding='utf-8')
</code>
<code>
eng_subject = df_Riss_research_subject['research_subject'].tolist()
eng_subject
</code>
### 형태소 분석기
#### 불용어리스트 만들기
<code>
f=open('./csv/eng_academic_research_stopwords.txt')
stopwords=[]
lines = f.readlines()
for line in lines:
line = line.strip()
stopwords.append(line)
f.close()
</code>
<code>
df_Riss_research_subject['research_subject'] = df_Riss_research_subject['research_subject'].str.lower()
</code>
<code>
from sklearn.feature_extraction.text import TfidfVectorizer
tfidfVectorizer = TfidfVectorizer(stop_words=stopwords
, ngram_range=(1,2)
, max_df=0.90
, min_df=1) # stop_words는 vocabulary에서 필요없는 단어를 빼주는 것. ngram_range는 단어를 붙여주는 것으로 2개의 단어가 합성되었을때 의미를 가지고, 떨어져있을때 의미가 상실되는 경우를 포함함.
result_vectors = tfidfVectorizer.fit_transform(eng_subject) # fit & transform은 다른 2가지 임.(fit하면 각 단어의 vocabulary 만들 수 있음.)
result_vectors.toarray()[:2]
</code>
<code>
tfidfVectorizer.vocabulary_
</code>
<code>
from sklearn.decomposition import LatentDirichletAllocation
lda_model = LatentDirichletAllocation(n_components=3, n_jobs=-1) #인스턴스화 #n_components 토픽의 갯수
lda_model.fit(result_vectors) #교육
</code>
<code>
dictionary_list = tfidfVectorizer.get_feature_names_out()
dictionary_list
</code>
<code>
lda_model.components_
</code>
<code>
topics_output = lda_model.transform(result_vectors)
df_topics_score = pd.DataFrame(data=topics_output)
df_topics_score
</code>
<code>
df_topics_score['dominant_topic_number'] = np.argmax(topics_output, axis=1)
df_topics_score['sentences'] = df_Riss_research_subject['research_subject']
df_topics_score
</code>
### topic별 word 추출
<code>
## 상위 단어 추출
## 0 확률 1은 dictionary
topics_list = list()
for topic in lda_model.components_:
df_datas = [topic, dictionary_list]
df_topics = pd.DataFrame(data=df_datas)
df_topics= df_topics.T
df_topics = df_topics.sort_values(0, ascending=False)
# print(df_topics[:3])
topics_text = ' '.join(df_topics[1].values[:3])# 시리즈 형식으로 출력 get values from series / index
print(topics_text)
topics_list.append(topics_text)
topics_list_add = [topics_list, ['Topic0', 'Topic1', 'Topic2']]
df_topics_keywords = pd.DataFrame(topics_list_add)
</code>
<code>
df_topics_keywords
</code>
<code>
import pyLDAvis
import pyLDAvis.lda_model
</code>
<code>
vis = pyLDAvis.lda_model.prepare(lda_model, result_vectors, tfidfVectorizer) # 토픽모델, 교육이 끝난 값(행렬형태), 교육모델
</code>
<code>
pyLDAvis.enable_notebook()
pyLDAvis.display(vis) # PCA - 차원축소
</code>
| {
"filename": "New_eng_academic_research_2.ipynb",
"repository": "kdj0712/teamKim1",
"query": "transformed_from_existing",
"size": 277407,
"sha": ""
} |
# analyses_3.SCENIC-V10-V2_1.ipynb
Repository: aertslab/scenicplus
### 1. Create SCENIC+ object
<code>
# Load functions
from scenicplus.scenicplus_class import SCENICPLUS, create_SCENICPLUS_object
from scenicplus.preprocessing.filtering import *
</code>
First we will load the scRNA-seq and the scATAC-seq data. We make sure that names match between them.
<code>
# Load data
## ATAC - cisTopic object
outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/pycisTopic/'
import pickle
infile = open(outDir + 'DPCL_cisTopicObject.pkl', 'rb')
cistopic_obj = pickle.load(infile)
infile.close()
## Precomputed imputed data
import pickle
infile = open(outDir + 'DARs/Imputed_accessibility.pkl', 'rb')
imputed_acc_obj = pickle.load(infile)
infile.close()
## RNA - Create Anndata
from loomxpy.loomxpy import SCopeLoom
from pycisTopic.loom import *
import itertools
import anndata
path_to_loom = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/vsn/grnboost/out/data/scRNA_count_matrix.SINGLE_SAMPLE_SCENIC.loom'
loom = SCopeLoom.read_loom(path_to_loom)
metadata = get_metadata(loom)
# Fix names
metadata.index = metadata.index + '___DPLC'
expr_mat = loom.ex_mtx
expr_mat.index = expr_mat.index + '___DPLC'
rna_anndata = anndata.AnnData(X=expr_mat)
rna_anndata.obs = metadata
</code>
Next we load the motif enrichment results into a dictionary. We can load motif results from the different methods in pycistarget (e.g. cisTarget, DEM) and different region sets (e.g. topics, DARs, MACS bdgdiff peaks). In this tutorial we will use both cisTarget and DEM peaks from topics and DARs.
<code>
## Precomputed imputed data
import pickle
infile = open('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/pycistarget/cluster_V10_V2/menr.pkl', 'rb')
menr = pickle.load(infile)
infile.close()
</code>
Now we can create the SCENIC+ object:
<code>
scplus_obj = create_SCENICPLUS_object(
GEX_anndata = rna_anndata,
cisTopic_obj = cistopic_obj,
imputed_acc_obj = imputed_acc_obj,
menr = menr,
ACC_prefix = 'ACC_',
GEX_prefix = 'GEX_',
bc_transform_func = lambda x: x,
normalize_imputed_acc = False)
</code>
<code>
type(scplus_obj.X_EXP)
</code>
<code>
print(scplus_obj)
</code>
You can also filter low accessible regions and low expressed genes. This recommended to avoid getting false relationships with these regions and genes.
<code>
filter_genes(scplus_obj, min_pct = 0.5)
filter_regions(scplus_obj, min_pct = 0.5)
</code>
<code>
# Save
outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final/'
import pickle
with open(outDir+'scplus_obj.pkl', 'wb') as f:
pickle.dump(scplus_obj, f)
</code>
### GRNBoost
<code>
singularity exec -B /lustre1,/staging,/data,/vsc-hard-mounts,/scratch scenicplus.sif ipython3
</code>
<code>
# For the downstream analyses
outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final/'
import pickle
infile = open(outDir+'scplus_obj.pkl', 'rb')
scplus_obj = pickle.load(infile)
infile.close()
import pickle
infile = open('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_V2_grnboost/region_ranking.pkl', 'rb')
region_ranking = pickle.load(infile)
infile.close()
import pickle
infile = open('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_V2_grnboost/gene_ranking.pkl', 'rb')
gene_ranking = pickle.load(infile)
infile.close()
from scenicplus.wrappers.run_scenicplus import *
run_scenicplus(scplus_obj,
variable = ['ACC_Cell_type'],
species = 'hsapiens',
assembly = 'hg38',
tf_file = '/staging/leuven/stg_00002/lcb/cflerin/resources/allTFs_hg38.txt',
save_path = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final_autoreg/grnboost/',
biomart_host = 'http://oct2016.archive.ensembl.org/',
upstream = [1000, 150000],
downstream = [1000, 150000],
calculate_TF_eGRN_correlation = False,
calculate_DEGs_DARs = True,
export_to_loom_file = True,
export_to_UCSC_file = True,
region_ranking=region_ranking,
gene_ranking=gene_ranking,
tree_structure = ('DPCL', 'SCENIC+', 'grnboost'),
path_bedToBigBed = '/data/leuven/software/biomed/haswell_centos7/2018a/software/Kent_tools/20190730-linux.x86_64/bin/',
n_cpu = 20,
_temp_dir = '/scratch/leuven/313/vsc31305/ray_spill'
)
</code>
<code>
# For the downstream analyses
outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final_autoreg/grnboost/'
import dill
infile = open(outDir+'scplus_obj.pkl', 'rb')
scplus_obj = dill.load(infile)
infile.close()
</code>
<code>
import pandas as pd
def format_egrns_time(scplus_obj,
eregulons_key: str = 'eRegulons',
TF2G_key: str = 'TF2G_adj',
key_added: str = 'eRegulon_metadata'):
"""
A function to format eRegulons to a pandas dataframe
"""
egrn_list = scplus_obj.uns[eregulons_key]
TF = [egrn_list[x].transcription_factor for x in range(len(egrn_list))]
is_extended = [str(egrn_list[x].is_extended)
for x in range(len(egrn_list))]
r2g_data = [pd.DataFrame.from_records(egrn_list[x].regions2genes, columns=[
'Region', 'Gene', 'R2G_importance', 'R2G_rho', 'R2G_importance_x_rho', 'R2G_importance_x_abs_rho']) for x in range(len(egrn_list))]
egrn_name = [TF[x] + '_extended' if is_extended[x] ==
'True' else TF[x] for x in range(len(egrn_list))]
egrn_name = [egrn_name[x] + '_+' if 'positive tf2g' in egrn_list[x]
.context else egrn_name[x] + '_-' for x in range(len(egrn_list))]
egrn_name = [egrn_name[x] + '_+' if 'positive r2g' in egrn_list[x]
.context else egrn_name[x] + '_-' for x in range(len(egrn_list))]
region_signature_name = [
egrn_name[x] + '_(' + str(len(set(r2g_data[x].Region))) + 'r)' for x in range(len(egrn_list))]
gene_signature_name = [
egrn_name[x] + '_(' + str(len(set(r2g_data[x].Gene))) + 'g)' for x in range(len(egrn_list))]
for x in range(len(egrn_list)):
r2g_data[x].insert(0, "TF", TF[x])
r2g_data[x].insert(1, "is_extended", is_extended[x])
r2g_data[x].insert(0, "Gene_signature_name", gene_signature_name[x])
r2g_data[x].insert(0, "Region_signature_name",
region_signature_name[x])
tf2g_data = scplus_obj.uns[TF2G_key].copy()
tf2g_data.columns = ['TF', 'Gene', 'TF2G_importance', 'TF2G_regulation',
'TF2G_rho', 'TF2G_importance_x_abs_rho', 'TF2G_importance_x_rho']
egrn_metadata = pd.concat([pd.merge(r2g_data[x], tf2g_data[tf2g_data.TF == r2g_data[x].TF[0]], on=[
'TF', 'Gene']) for x in range(len(egrn_list)) if tf2g_data[tf2g_data.TF == r2g_data[x].TF[0]].shape[0] != 0 and r2g_data[x].shape[0] != 0])
scplus_obj.uns[key_added] = egrn_metadata
</code>
<code>
format_egrns_time(scplus_obj,
eregulons_key = 'eRegulons',
TF2G_key = 'TF2G_adj',
key_added = 'eRegulon_metadata')
</code>
<code>
scplus_obj.uns['region_to_gene'].to_csv('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_V2_grnboost_2/region_to_gene.tsv', sep='\t')
</code>
<code>
scplus_obj.uns['region_to_gene'][scplus_obj.uns['region_to_gene']['rho'] >0].to_csv('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_V2_grnboost_2/region_to_gene_pos.tsv', sep='\t')
</code>
<code>
select = list(set(scplus_obj.uns['eRegulon_metadata']['Region']))
</code>
<code>
scplus_obj.uns['region_to_gene']
</code>
<code>
scplus_obj.uns['region_to_gene'][scplus_obj.uns['region_to_gene']['region'].isin(select)]
</code>
<code>
scplus_obj.uns['region_to_gene'][scplus_obj.uns['region_to_gene']['region'].isin(select)].to_csv('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_V2_grnboost_2/region_to_gene_in_eGRN.tsv', sep='\t')
</code>
<code>
import pandas as pd
hic_data = pd.read_csv('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/formatted/HepG2_ENCFF020DPP_5Kb_SCALE.txt', sep='\t')
hic_data.columns = ['Chromosome', 'Start', 'End', 'target', 'importance', 'TSS', 'Strand', 'Distance', 'region']
hic_data['rho'] = hic_data['importance']
hic_data = hic_data[['target', 'region', 'importance', 'rho', 'Distance']]
hic_data = hic_data[hic_data['region'].isin(scplus_obj.region_names)]
hic_data
</code>
<code>
import pandas as pd
import numpy as np
import ray
import logging
import time
import sys
import os
import subprocess
import pyranges as pr
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor, ExtraTreesRegressor
from scipy.stats import pearsonr, spearmanr
from tqdm import tqdm
from matplotlib import cm
from matplotlib.colors import Normalize
from typing import List
from scenicplus.utils import extend_pyranges, extend_pyranges_with_limits, reduce_pyranges_with_limits_b
from scenicplus.utils import calculate_distance_with_limits_join, reduce_pyranges_b, calculate_distance_join
from scenicplus.utils import coord_to_region_names, region_names_to_coordinates, ASM_SYNONYMS, Groupby, flatten_list
from scenicplus.scenicplus_class import SCENICPLUS
from scenicplus.enhancer_to_gene import INTERACT_AS
def export_to_UCSC_interact_hic(SCENICPLUS_obj: SCENICPLUS,
species: str,
outfile: str,
region_to_gene_key: str =' region_to_gene',
pbm_host:str = 'http://www.ensembl.org',
bigbed_outfile:str = None,
path_bedToBigBed: str= None,
assembly: str = None,
ucsc_track_name: str = 'region_to_gene',
ucsc_description: str = 'interaction file for region to gene',
cmap_neg: str = 'Reds',
cmap_pos: str = 'Greens',
key_for_color: str = 'importance',
vmin: int = 0,
vmax: int = 1,
scale_by_gene: bool = True,
subset_for_eRegulons_regions: bool = True,
eRegulons_key: str = 'eRegulons') -> pd.DataFrame:
# Create logger
level = logging.INFO
format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
handlers = [logging.StreamHandler(stream=sys.stdout)]
logging.basicConfig(level=level, format=format, handlers=handlers)
log = logging.getLogger('R2G')
if region_to_gene_key not in SCENICPLUS_obj.uns.keys():
raise Exception(
f'key {region_to_gene_key} not found in SCENICPLUS_obj.uns, first calculate region to gene relationships using function: "calculate_regions_to_genes_relationships"')
region_to_gene_df = SCENICPLUS_obj.uns[region_to_gene_key].copy()
if subset_for_eRegulons_regions:
if eRegulons_key not in SCENICPLUS_obj.uns.keys():
raise ValueError(
f'key {eRegulons_key} not found in SCENICPLUS_obj.uns.keys()')
eRegulon_regions = list(set(flatten_list(
[ereg.target_regions for ereg in SCENICPLUS_obj.uns[eRegulons_key]])))
region_to_gene_df.index = region_to_gene_df['region']
region_to_gene_df = region_to_gene_df.loc[eRegulon_regions].reset_index(
drop=True)
# Rename columns to be in line with biomart annotation
region_to_gene_df.rename(columns={'target': 'Gene'}, inplace=True)
# Get TSS annotation (end-point for links)
log.info('Downloading gene annotation from biomart, using dataset: {}'.format(
species+'_gene_ensembl'))
import pybiomart as pbm
dataset = pbm.Dataset(name=species+'_gene_ensembl', host=pbm_host)
annot = dataset.query(attributes=['chromosome_name', 'start_position', 'end_position',
'strand', 'external_gene_name', 'transcription_start_site', 'transcript_biotype'])
annot.columns = ['Chromosome', 'Start', 'End', 'Strand',
'Gene', 'Transcription_Start_Site', 'Transcript_type']
annot['Chromosome'] = 'chr' + \
annot['Chromosome'].astype(str)
annot = annot[annot.Transcript_type == 'protein_coding']
annot.Strand[annot.Strand == 1] = '+'
annot.Strand[annot.Strand == -1] = '-'
log.info('Formatting data ...')
# get gene to tss mapping, take the one equal to the gene start/end location if possible otherwise take the first one
annot['TSSeqStartEnd'] = np.logical_or(
annot['Transcription_Start_Site'] == annot['Start'], annot['Transcription_Start_Site'] == annot['End'])
gene_to_tss = annot[['Gene', 'Transcription_Start_Site']].groupby(
'Gene').agg(lambda x: list(map(str, x)))
startEndEq = annot[['Gene', 'TSSeqStartEnd']
].groupby('Gene').agg(lambda x: list(x))
gene_to_tss['Transcription_Start_Site'] = [np.array(tss[0])[eq[0]][0] if sum(
eq[0]) >= 1 else tss[0][0] for eq, tss in zip(startEndEq.values, gene_to_tss.values)]
gene_to_tss.columns = ['TSS_Gene']
# get gene to strand mapping
gene_to_strand = annot[['Gene', 'Strand']].groupby(
'Gene').agg(lambda x: list(map(str, x))[0])
# get gene to chromosome mapping (should be the same as the regions mapped to the gene)
gene_to_chrom = annot[['Gene', 'Chromosome']].groupby(
'Gene').agg(lambda x: list(map(str, x))[0])
# add TSS for each gene to region_to_gene_df
region_to_gene_df = region_to_gene_df.join(gene_to_tss, on='Gene')
# add strand for each gene to region_to_gene_df
region_to_gene_df = region_to_gene_df.join(gene_to_strand, on='Gene')
# add chromosome for each gene to region_to_gene_df
region_to_gene_df = region_to_gene_df.join(gene_to_chrom, on='Gene')
# get chrom, chromStart, chromEnd
region_to_gene_df.dropna(axis=0, how='any', inplace=True)
arr = region_names_to_coordinates(region_to_gene_df['region']).to_numpy()
chrom, chromStart, chromEnd = np.split(arr, 3, 1)
chrom = chrom[:, 0]
chromStart = chromStart[:, 0]
chromEnd = chromEnd[:, 0]
# get source chrom, chromStart, chromEnd (i.e. middle of regions)
sourceChrom = chrom
sourceStart = np.array(
list(map(int, chromStart + (chromEnd - chromStart)/2 - 1)))
sourceEnd = np.array(
list(map(int, chromStart + (chromEnd - chromStart)/2)))
# get target chrom, chromStart, chromEnd (i.e. TSS)
targetChrom = region_to_gene_df['Chromosome']
targetStart = region_to_gene_df['TSS_Gene'].values
targetEnd = list(map(str, np.array(list(map(int, targetStart))) + np.array(
[1 if strand == '+' else -1 for strand in region_to_gene_df['Strand'].values])))
# get color
norm = Normalize(vmin=vmin, vmax=vmax)
if scale_by_gene:
grouper = Groupby(
region_to_gene_df.loc[:, 'Gene'].to_numpy())
scores = region_to_gene_df.loc[:, key_for_color].to_numpy()
mapper = cm.ScalarMappable(norm=norm, cmap=cmap_pos)
def _value_to_color(scores):
S = (scores - scores.min()) / (scores.max() - scores.min())
return [','.join([str(x) for x in mapper.to_rgba(s, bytes=True)][0:3]) for s in S]
colors_pos = np.zeros(len(scores), dtype='object')
for idx in grouper.indices:
colors_pos[idx] = _value_to_color(scores[idx])
def _value_to_color(scores):
S = (scores - scores.min()) / (scores.max() - scores.min())
return [','.join([str(x) for x in mapper.to_rgba(s, bytes=True)][0:3]) for s in S]
colors_neg = np.zeros(len(scores), dtype='object')
for idx in grouper.indices:
colors_neg[idx] = _value_to_color(scores[idx])
else:
scores = region_to_gene_df.loc[:, key_for_color].to_numpy()
mapper = cm.ScalarMappable(norm=norm, cmap=cmap_pos)
colors_pos = [
','.join([str(x) for x in mapper.to_rgba(s, bytes=True)][0:3]) for s in scores]
region_to_gene_df.loc[:, 'color'] = colors_pos
region_to_gene_df['color'] = region_to_gene_df['color'].fillna('55,55,55')
# get name for regions (add incremental number to gene in range of regions linked to gene)
counter = 1
previous_gene = region_to_gene_df['Gene'].values[0]
names = []
for gene in region_to_gene_df['Gene'].values:
if gene != previous_gene:
counter = 1
else:
counter += 1
names.append(gene + '_' + str(counter))
previous_gene = gene
# format final interact dataframe
df_interact = pd.DataFrame(
data={
'chrom': chrom,
'chromStart': chromStart,
'chromEnd': chromEnd,
'name': names,
'score': (1000*(region_to_gene_df['importance'].values - np.min(region_to_gene_df['importance'].values))/np.ptp(region_to_gene_df['importance'].values)).astype(int) ,
'value': region_to_gene_df['importance'].values,
'exp': np.repeat('.', len(region_to_gene_df)),
'color': region_to_gene_df['color'].values,
'sourceChrom': sourceChrom,
'sourceStart': sourceStart,
'sourceEnd': sourceEnd,
'sourceName': names,
'sourceStrand': np.repeat('.', len(region_to_gene_df)),
'targetChrom': targetChrom,
'targetStart': targetStart,
'targetEnd': targetEnd,
'targetName': region_to_gene_df['Gene'].values,
'targetStrand': region_to_gene_df['Strand'].values
}
)
# sort dataframe
df_interact = df_interact.sort_values(by=['chrom', 'chromStart'])
# Write interact file
log.info('Writing data to: {}'.format(outfile))
with open(outfile, 'w') as f:
f.write('track type=interact name="{}" description="{}" useScore=0 maxHeightPixels=200:100:50 visibility=full\n'.format(
ucsc_track_name, ucsc_description))
df_interact.to_csv(f, header=False, index=False, sep='\t')
# write bigInteract file
if bigbed_outfile != None:
log.info('Writing data to: {}'.format(bigbed_outfile))
outfolder = bigbed_outfile.rsplit('/', 1)[0]
# write bed file without header to tmp file
df_interact.to_csv(os.path.join(
outfolder, 'interact.bed.tmp'), header=False, index=False, sep='\t')
# check if auto sql definition for interaction file exists in outfolder, otherwise create it
if not os.path.exists(os.path.join(outfolder, 'interact.as')):
with open(os.path.join(outfolder, 'interact.as'), 'w') as f:
f.write(INTERACT_AS)
# convert interact.bed.tmp to bigBed format
# bedToBigBed -as=interact.as -type=bed5+13 region_to_gene_no_head.interact https://genome.ucsc.edu/goldenPath/help/hg38.chrom.sizes region_to_gene.inter.bb
cmds = [
os.path.join(path_bedToBigBed, 'bedToBigBed'),
'-as={}'.format(os.path.join(os.path.join(outfolder, 'interact.as'))),
'-type=bed5+13',
os.path.join(outfolder, 'interact.bed.tmp'),
'https://hgdownload.cse.ucsc.edu/goldenpath/' + assembly + '/bigZips/' + assembly + '.chrom.sizes',
bigbed_outfile
]
p = subprocess.Popen(cmds, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
raise ValueError(
"cmds: %s\nstderr:%s\nstdout:%s" % (
" ".join(cmds), stderr, stdout)
)
return df_interact
</code>
# Generate Hi-C files
<code>
# For the downstream analyses
outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final_autoreg/grnboost/'
import dill
infile = open(outDir+'scplus_obj.pkl', 'rb')
scplus_obj = dill.load(infile)
infile.close()
</code>
<code>
r2g = scplus_obj.uns['region_to_gene'].copy()
r2g = r2g[r2g.rho > 0.03]
r2g
</code>
<code>
scplus_obj.uns['region_to_gene'].to_csv('/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final_autoreg/grnboost/region_to_gene_pos.tsv', sep='\t')
</code>
<code>
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
cell_lines=['GM12878', 'HepG2', 'IMR90', 'HCT116', 'K562']
save_path = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/bb_files_final/'
species = 'hsapiens'
assembly = 'hg38'
path_bedToBigBed = '/data/leuven/software/biomed/haswell_centos7/2018a/software/Kent_tools/20190730-linux.x86_64/bin/'
biomart_host = 'http://oct2016.archive.ensembl.org/'
path='/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/formatted/'
for cell_line in cell_lines:
files = [f for f in listdir(path) if isfile(join(path, f))]
file = [f for f in files if cell_line in f]
hic_data = pd.read_csv(path+file[0], sep='\t')
hic_data.columns = ['Chromosome', 'Start', 'End', 'target', 'importance', 'TSS', 'Strand', 'Distance', 'region']
hic_data['rho'] = hic_data['importance']
hic_data = hic_data[['target', 'region', 'importance', 'rho', 'Distance']]
hic_data = hic_data[hic_data['region'].isin(scplus_obj.region_names)]
scplus_obj.uns['region_to_gene'] = hic_data
r2g_data = export_to_UCSC_interact_hic(scplus_obj,
species,
os.path.join(save_path,cell_line+'.hic.all.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,cell_line+'.hic.all.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description=cell_line+' HiC links',
cmap_neg='Reds',
cmap_pos='Greys',
key_for_color='importance',
scale_by_gene=True,
subset_for_eRegulons_regions=False,
eRegulons_key='eRegulons')
</code>
<code>
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
cell_lines=['CellOracle', 'FigR', 'GRaNIE', 'Scenicplus-importance', 'Scenicplus-rho']
color_dict={'CellOracle':'Greens', 'FigR':'Purples', 'GRaNIE':'Oranges', 'Scenicplus-importance':'Blues', 'Scenicplus-rho':'Reds'}
save_path = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/bb_files_final/'
species = 'hsapiens'
assembly = 'hg38'
path_bedToBigBed = '/data/leuven/software/biomed/haswell_centos7/2018a/software/Kent_tools/20190730-linux.x86_64/bin/'
biomart_host = 'http://oct2016.archive.ensembl.org/'
path='/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/formatted/'
for cell_line in cell_lines:
files = [f for f in listdir(path) if isfile(join(path, f))]
file = [f for f in files if cell_line in f]
hic_data = pd.read_csv(path+file[0], sep='\t')
hic_data.columns = ['Chromosome', 'Start', 'End', 'target', 'importance', 'TSS', 'Strand', 'Distance', 'region']
hic_data['rho'] = hic_data['importance']
hic_data = hic_data[['target', 'region', 'importance', 'rho', 'Distance']]
hic_data = hic_data[hic_data['region'].isin(scplus_obj.region_names)]
scplus_obj.uns['region_to_gene'] = hic_data
r2g_data = export_to_UCSC_interact_hic(scplus_obj,
species,
os.path.join(save_path,cell_line+'.links.all.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,cell_line+'.links.all.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description='SCENIC+ region to gene links',
cmap_neg='Reds',
cmap_pos=color_dict[cell_line],
key_for_color='importance',
scale_by_gene=True,
subset_for_eRegulons_regions=False,
eRegulons_key='eRegulons')
</code>
<code>
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
cell_lines=['Scenicplus-importance_links_all', 'Scenicplus-rho_links_all']
color_dict={'Scenicplus-importance_links_all':'Blues', 'Scenicplus-rho_links_all':'Reds'}
save_path = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/bb_files_final/'
species = 'hsapiens'
assembly = 'hg38'
path_bedToBigBed = '/data/leuven/software/biomed/haswell_centos7/2018a/software/Kent_tools/20190730-linux.x86_64/bin/'
biomart_host = 'http://oct2016.archive.ensembl.org/'
path='/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/data/HiC/formatted/'
for cell_line in cell_lines:
files = [f for f in listdir(path) if isfile(join(path, f))]
file = [f for f in files if cell_line in f]
hic_data = pd.read_csv(path+file[0], sep='\t')
hic_data.columns = ['Chromosome', 'Start', 'End', 'target', 'importance', 'TSS', 'Strand', 'Distance', 'region']
hic_data['rho'] = hic_data['importance']
hic_data = hic_data[['target', 'region', 'importance', 'rho', 'Distance']]
hic_data = hic_data[hic_data['region'].isin(scplus_obj.region_names)]
scplus_obj.uns['region_to_gene'] = hic_data
r2g_data = export_to_UCSC_interact_hic(scplus_obj,
species,
os.path.join(save_path,cell_line+'.links.all.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,cell_line+'.links.all.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description='SCENIC+ region to gene links',
cmap_neg='Reds',
cmap_pos=color_dict[cell_line],
key_for_color='importance',
scale_by_gene=True,
subset_for_eRegulons_regions=False,
eRegulons_key='eRegulons')
</code>
<code>
#from scenicplus.enhancer_to_gene import export_to_UCSC_interact
</code>
<code>
hic_data
</code>
<code>
eRegulon_regions = list(set(flatten_list([ereg.target_regions for ereg in scplus_obj.uns['eRegulons']])))
hic_data = hic_data[hic_data['region'].isin(eRegulon_regions)]
scplus_obj.uns['region_to_gene'] = hic_data
r2g_data = export_to_UCSC_interact_hic(scplus_obj,
species,
os.path.join(save_path,'HepG2.hic.eGRN.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,'HepG2.hic.eGRN.notscaled.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description='SCENIC+ region to gene links',
cmap_neg='Reds',
cmap_pos='Greens',
key_for_color='importance',
scale_by_gene=False,
subset_for_eRegulons_regions=False,
eRegulons_key='eRegulons')
</code>
<code>
# For the downstream analyses
#outDir = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final/'
outDir = '/data/users/cbravo/DPCL/scenicplus_final_autoreg/'
import pickle
infile = open(outDir+'scplus_obj.pkl', 'rb')
scplus_obj = pickle.load(infile)
infile.close()
import pickle
infile = open('/data/users/cbravo/DPCL/scenicplus_final_autoreg/genie3/region_ranking.pkl', 'rb')
region_ranking = pickle.load(infile)
infile.close()
import pickle
infile = open('/data/users/cbravo/DPCL/scenicplus_final_autoreg/genie3/gene_ranking.pkl', 'rb')
gene_ranking = pickle.load(infile)
infile.close()
from scenicplus.wrappers.run_scenicplus import *
run_scenicplus_genie3(scplus_obj,
variable = ['ACC_Cell_type'],
species = 'hsapiens',
assembly = 'hg38',
tf_file = '/data/users/cbravo/resources/allTFs_hg38.txt',
save_path = '/data/users/cbravo/DPCL/scenicplus_final_autoreg/genie3/',
biomart_host = 'http://oct2016.archive.ensembl.org/',
upstream = [1000, 150000],
downstream = [1000, 150000],
calculate_TF_eGRN_correlation = False,
calculate_DEGs_DARs = True,
export_to_loom_file = True,
export_to_UCSC_file = True,
region_ranking=region_ranking,
gene_ranking=gene_ranking,
tree_structure = ('DPCL', 'SCENIC+', 'genie3'),
path_bedToBigBed = '/media/data/users/cbravo/software/KENT/',
n_cpu = 20,
_temp_dir = '/media/data/users/cbravo/ray_spill'
)
from scenicplus.wrappers.run_scenicplus import *
run_scenicplus_genie3(scplus_obj,
variable = ['ACC_Cell_type'],
species = 'hsapiens',
assembly = 'hg38',
tf_file = '/staging/leuven/stg_00002/lcb/cflerin/resources/allTFs_hg38.txt',
save_path = '/staging/leuven/stg_00002/lcb/cbravo/Multiomics_pipeline/analysis/DPCL/scenicplus_final_autoreg/genie3/',
biomart_host = 'http://oct2016.archive.ensembl.org/',
upstream = [1000, 150000],
downstream = [1000, 150000],
calculate_TF_eGRN_correlation = False,
calculate_DEGs_DARs = True,
export_to_loom_file = True,
export_to_UCSC_file = True,
tree_structure = ('DPCL', 'SCENIC+', 'genie3'),
path_bedToBigBed = '/data/leuven/software/biomed/haswell_centos7/2018a/software/Kent_tools/20190730-linux.x86_64/bin/',
n_cpu = 14,
_temp_dir = '/scratch/leuven/313/vsc31305/ray_spill'
)
</code>
<code>
from scenicplus.scenicplus_class import SCENICPLUS, create_SCENICPLUS_object
from scenicplus.preprocessing.filtering import *
from scenicplus.cistromes import *
from scenicplus.enhancer_to_gene import get_search_space, calculate_regions_to_genes_relationships, RF_KWARGS
from scenicplus.enhancer_to_gene import export_to_UCSC_interact
from scenicplus.utils import format_egrns, export_eRegulons
from scenicplus.eregulon_enrichment import *
from scenicplus.TF_to_gene import *
from scenicplus.grn_builder.gsea_approach import build_grn
from scenicplus.dimensionality_reduction import *
from scenicplus.RSS import *
from scenicplus.diff_features import *
from scenicplus.loom import *
from typing import Dict, List, Mapping, Optional, Sequence
import os
import dill
import time
def run_scenicplus_genie3(scplus_obj: 'SCENICPLUS',
variable: List[str],
species: str,
assembly: str,
tf_file: str,
save_path: str,
biomart_host: Optional[str] = 'http://www.ensembl.org',
upstream: Optional[List] = [1000, 150000],
downstream: Optional[List] = [1000, 150000],
region_ranking: Optional['CisTopicImputedFeatures'] = None,
gene_ranking: Optional['CisTopicImputedFeatures'] = None,
calculate_TF_eGRN_correlation: Optional[bool] = True,
calculate_DEGs_DARs: Optional[bool] = True,
export_to_loom_file: Optional[bool] = True,
export_to_UCSC_file: Optional[bool] = True,
tree_structure: Sequence[str] = (),
path_bedToBigBed: Optional[str] = None,
n_cpu: Optional[int] = 1,
_temp_dir: Optional[str] = '/scratch/leuven/313/vsc31305/ray_spill'
):
"""
Wrapper to run SCENIC+
Parameters
---------
scplus_obj: `class::SCENICPLUS`
A SCENICPLUS object.
variables: List[str]
Variables to use for RSS, TF-eGRN correlation and markers.
species: str
Species from which data comes from. Possible values: 'hsapiens', 'mmusculus', 'dmelanogaster'
assembly: str
Genome assembly to which the data was mapped. Possible values: 'hg38'
tf_file: str
Path to file containing genes that are TFs
save_path: str
Folder in which results will be saved
biomart_host: str, optional
Path to biomart host. Make sure that the host matches your genome assembly
upstream: str, optional
Upstream space to use for region to gene relationships
downstream: str, optional
Upstream space to use for region to gene relationships
region_ranking: `class::CisTopicImputedFeatures`, optional
Precomputed region ranking
gene_ranking: `class::CisTopicImputedFeatures`, optional
Precomputed gene ranking
calculate_TF_eGRN_correlation: bool, optional
Whether to calculate the TF-eGRN correlation based on the variables
calculate_DEGs_DARs: bool, optional
Whether to calculate DARs/DEGs based on the variables
export_to_loom_file: bool, optional
Whether to export data to loom files (gene based/region based)
export_to_UCSC_file: bool, optional
Whether to export region-to-gene links and eregulons to bed files
tree_structure: sequence, optional
Tree structure for loom files
path_bedToBigBed: str, optional
Path to convert bed files to big bed when exporting to UCSC (required if files are meant to be
used in a hub)
n_cpu: int, optional
Number of cores to use
_temp_dir: str, optional
Temporary directory for ray
"""
# Create logger
level = logging.INFO
log_format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
handlers = [logging.StreamHandler(stream=sys.stdout)]
logging.basicConfig(level=level, format=log_format, handlers=handlers)
log = logging.getLogger('SCENIC+_wrapper')
start_time = time.time()
check_folder = os.path.isdir(save_path)
if not check_folder:
os.makedirs(save_path)
log.info("Created folder : "+ save_path)
else:
log.info(save_path + " folder already exists.")
if 'Cistromes' not in scplus_obj.uns.keys():
log.info('Merging cistromes')
merge_cistromes(scplus_obj)
if 'search_space' not in scplus_obj.uns.keys():
log.info('Getting search space')
get_search_space(scplus_obj,
biomart_host = biomart_host,
species = species,
assembly = assembly,
upstream = upstream,
downstream = downstream)
if 'region_to_gene' not in scplus_obj.uns.keys():
log.info('Inferring region to gene relationships')
calculate_regions_to_genes_relationships(scplus_obj,
ray_n_cpu = n_cpu,
_temp_dir = _temp_dir,
importance_scoring_method = 'RF',
importance_scoring_kwargs = RF_KWARGS)
if 'TF2G_adj' not in scplus_obj.uns.keys():
log.info('Inferring TF to gene relationships')
calculate_TFs_to_genes_relationships(scplus_obj,
tf_file = tf_file,
ray_n_cpu = n_cpu,
method = 'GBM',
_temp_dir = _temp_dir,
key= 'TF2G_adj')
if 'eRegulons' not in scplus_obj.uns.keys():
log.info('Build eGRN')
build_grn(scplus_obj,
min_target_genes = 10,
adj_pval_thr = 1,
min_regions_per_gene = 0,
quantiles = (0.85, 0.90, 0.95),
top_n_regionTogenes_per_gene = (5, 10, 15),
top_n_regionTogenes_per_region = (),
binarize_using_basc = True,
rho_dichotomize_tf2g = True,
rho_dichotomize_r2g = True,
rho_dichotomize_eregulon = True,
rho_threshold = 0.05,
keep_extended_motif_annot = True,
merge_eRegulons = True,
order_regions_to_genes_by = 'importance',
order_TFs_to_genes_by = 'importance',
key_added = 'eRegulons',
cistromes_key = 'Unfiltered',
disable_tqdm = True,
ray_n_cpu = n_cpu,
_temp_dir = _temp_dir)
if 'eRegulon_metadata' not in scplus_obj.uns.keys():
log.info('Formatting eGRNs')
format_egrns(scplus_obj,
eregulons_key = 'eRegulons',
TF2G_key = 'TF2G_adj',
key_added = 'eRegulon_metadata')
if 'eRegulon_signatures' not in scplus_obj.uns.keys():
log.info('Converting eGRNs to signatures')
get_eRegulons_as_signatures(scplus_obj,
eRegulon_metadata_key='eRegulon_metadata',
key_added='eRegulon_signatures')
#if 'eRegulon_AUC' not in scplus_obj.uns.keys():
log.info('Calculating eGRNs AUC')
if region_ranking is None:
log.info('Calculating region ranking')
region_ranking = make_rankings(scplus_obj, target='region')
with open(os.path.join(save_path,'region_ranking.pkl'), 'wb') as f:
dill.dump(region_ranking, f)
log.info('Calculating eGRNs region based AUC')
score_eRegulons(scplus_obj,
ranking = region_ranking,
eRegulon_signatures_key = 'eRegulon_signatures',
key_added = 'eRegulon_AUC',
enrichment_type= 'region',
auc_threshold = 0.05,
normalize = False,
n_cpu = n_cpu)
if gene_ranking is None:
log.info('Calculating gene ranking')
gene_ranking = make_rankings(scplus_obj, target='gene')
with open(os.path.join(save_path,'gene_ranking.pkl'), 'wb') as f:
dill.dump(gene_ranking, f)
log.info('Calculating eGRNs gene based AUC')
score_eRegulons(scplus_obj,
gene_ranking,
eRegulon_signatures_key = 'eRegulon_signatures',
key_added = 'eRegulon_AUC',
enrichment_type = 'gene',
auc_threshold = 0.05,
normalize= False,
n_cpu = n_cpu)
if calculate_TF_eGRN_correlation is True:
log.info('Calculating TF-eGRNs AUC correlation')
for var in variable:
generate_pseudobulks(scplus_obj,
variable = var,
auc_key = 'eRegulon_AUC',
signature_key = 'Gene_based',
nr_cells = 5,
nr_pseudobulks = 100,
seed=555)
generate_pseudobulks(scplus_obj,
variable = var,
auc_key = 'eRegulon_AUC',
signature_key = 'Region_based',
nr_cells = 5,
nr_pseudobulks = 100,
seed=555)
TF_cistrome_correlation(scplus_obj,
variable = var,
auc_key = 'eRegulon_AUC',
signature_key = 'Gene_based',
out_key = var+'_eGRN_gene_based')
TF_cistrome_correlation(scplus_obj,
variable = var,
auc_key = 'eRegulon_AUC',
signature_key = 'Region_based',
out_key = var+'_eGRN_region_based')
#if 'eRegulon_AUC_thresholds' not in scplus_obj.uns.keys():
log.info('Binarizing eGRNs AUC')
binarize_AUC(scplus_obj,
auc_key='eRegulon_AUC',
out_key='eRegulon_AUC_thresholds',
signature_keys=['Gene_based', 'Region_based'],
n_cpu=n_cpu)
#if 'eRegulons_UMAP' not in scplus_obj.dr_cell.keys():
log.info('Making eGRNs AUC UMAP')
run_eRegulons_umap(scplus_obj,
scale=True, signature_keys=['Gene_based', 'Region_based'])
#if 'eRegulons_tSNE' not in scplus_obj.dr_cell.keys():
log.info('Making eGRNs AUC tSNE')
run_eRegulons_tsne(scplus_obj,
scale=True, signature_keys=['Gene_based', 'Region_based'])
#if 'RSS' not in scplus_obj.uns.keys():
log.info('Calculating eRSS')
for var in variable:
regulon_specificity_scores(scplus_obj,
var,
signature_keys=['Gene_based'],
out_key_suffix='_gene_based',
scale=False)
regulon_specificity_scores(scplus_obj,
var,
signature_keys=['Region_based'],
out_key_suffix='_region_based',
scale=False)
if calculate_DEGs_DARs is True:
log.info('Calculating DEGs/DARs')
for var in variable:
get_differential_features(scplus_obj, var, use_hvg = True, contrast_type = ['DEGs', 'DARs'])
if export_to_loom_file is True:
log.info('Exporting to loom file')
export_to_loom(scplus_obj,
signature_key = 'Gene_based',
tree_structure = tree_structure,
title = 'Gene based eGRN',
nomenclature = assembly,
out_fname=os.path.join(save_path,'SCENIC+_gene_based.loom'))
export_to_loom(scplus_obj,
signature_key = 'Region_based',
tree_structure = tree_structure,
title = 'Region based eGRN',
nomenclature = assembly,
out_fname=os.path.join(save_path,'SCENIC+_region_based.loom'))
if export_to_UCSC_file is True:
log.info('Exporting to UCSC')
r2g_data = export_to_UCSC_interact(scplus_obj,
species,
os.path.join(save_path,'r2g.rho.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,'r2g.rho.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description='SCENIC+ region to gene links',
cmap_neg='Reds',
cmap_pos='Greens',
key_for_color='rho',
scale_by_gene=False,
subset_for_eRegulons_regions=True,
eRegulons_key='eRegulons')
r2g_data = export_to_UCSC_interact(scplus_obj,
species,
os.path.join(save_path,'r2g.importance.bed'),
path_bedToBigBed=path_bedToBigBed,
bigbed_outfile=os.path.join(save_path,'r2g.importance.bb'),
region_to_gene_key='region_to_gene',
pbm_host=biomart_host,
assembly=assembly,
ucsc_track_name='R2G',
ucsc_description='SCENIC+ region to gene links',
cmap_neg='Reds',
cmap_pos='Greens',
key_for_color='importance',
scale_by_gene=True,
subset_for_eRegulons_regions=True,
eRegulons_key='eRegulons')
regions = export_eRegulons(scplus_obj,
os.path.join(save_path,'eRegulons.bed'),
assembly,
bigbed_outfile = os.path.join(save_path,'eRegulons.bb'),
eRegulon_metadata_key = 'eRegulon_metadata',
eRegulon_signature_key = 'eRegulon_signatures',
path_bedToBigBed=path_bedToBigBed)
log.info('Saving object')
with open(os.path.join(save_path,'scplus_obj_genie3.pkl'), 'wb') as f:
dill.dump(scplus_obj, f)
log.info('Finished! Took {} minutes'.format((time.time() - start_time)/60))
</code>
| {
"filename": "analyses_3.SCENIC-V10-V2_1.ipynb",
"repository": "aertslab/scenicplus",
"query": "transformed_from_existing",
"size": 123784,
"sha": ""
} |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 0